setting cache to nltk_cache
Browse files- Dockerfile +3 -3
- app/engine/vectorstore.py +2 -6
Dockerfile
CHANGED
@@ -11,8 +11,6 @@ COPY ./app /app
|
|
11 |
WORKDIR /app
|
12 |
RUN mkdir /data
|
13 |
|
14 |
-
ENV TRANSFORMERS_CACHE=/data/cache
|
15 |
-
RUN mkdir /data/cache
|
16 |
|
17 |
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
18 |
# ^ no caching of the packages to save space
|
@@ -21,6 +19,8 @@ RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
|
21 |
# ^ to fix runtime error, see https://github.com/run-llama/llama_index/issues/10681
|
22 |
# it didn't work, I had to do chmod below (as also suggested in the article)
|
23 |
|
24 |
-
RUN chmod -R 777 /usr/local/lib/python3.10/site-packages
|
|
|
|
|
25 |
|
26 |
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
|
|
11 |
WORKDIR /app
|
12 |
RUN mkdir /data
|
13 |
|
|
|
|
|
14 |
|
15 |
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
16 |
# ^ no caching of the packages to save space
|
|
|
19 |
# ^ to fix runtime error, see https://github.com/run-llama/llama_index/issues/10681
|
20 |
# it didn't work, I had to do chmod below (as also suggested in the article)
|
21 |
|
22 |
+
RUN chmod -R 777 /usr/local/lib/python3.10/site-packages/llama_index/legacy/_static/nltk_cache
|
23 |
+
|
24 |
+
ENV TRANSFORMERS_CACHE=/usr/local/lib/python3.10/site-packages/llama_index/legacy/_static/nltk_cache
|
25 |
|
26 |
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
app/engine/vectorstore.py
CHANGED
@@ -57,11 +57,8 @@ class VectorStore:
|
|
57 |
self.model_path = model_path
|
58 |
|
59 |
try:
|
60 |
-
print("We were here")
|
61 |
-
print("URL1", os.environ.get('FINRAG_WEAVIATE_ENDPOINT'))
|
62 |
self.api_key = os.environ.get('FINRAG_WEAVIATE_API_KEY')
|
63 |
self.url = os.environ.get('FINRAG_WEAVIATE_ENDPOINT')
|
64 |
-
print('Before client creation')
|
65 |
self.client = WeaviateWCS(endpoint=self.url,
|
66 |
api_key=self.api_key,
|
67 |
model_name_or_path=self.model_path)
|
@@ -71,9 +68,8 @@ class VectorStore:
|
|
71 |
# raise Exception(f"Could not create Weaviate client: {e}")
|
72 |
print(f"Could not create Weaviate client: {e}")
|
73 |
|
74 |
-
|
75 |
-
|
76 |
-
# assert self.client._client.is_ready(), "Weaviate is not ready"
|
77 |
# careful with accessing '_client' since the weaviate helper usually closes the connection every time
|
78 |
|
79 |
self.indexer = None
|
|
|
57 |
self.model_path = model_path
|
58 |
|
59 |
try:
|
|
|
|
|
60 |
self.api_key = os.environ.get('FINRAG_WEAVIATE_API_KEY')
|
61 |
self.url = os.environ.get('FINRAG_WEAVIATE_ENDPOINT')
|
|
|
62 |
self.client = WeaviateWCS(endpoint=self.url,
|
63 |
api_key=self.api_key,
|
64 |
model_name_or_path=self.model_path)
|
|
|
68 |
# raise Exception(f"Could not create Weaviate client: {e}")
|
69 |
print(f"Could not create Weaviate client: {e}")
|
70 |
|
71 |
+
assert self.client._client.is_live(), "Weaviate is not live"
|
72 |
+
assert self.client._client.is_ready(), "Weaviate is not ready"
|
|
|
73 |
# careful with accessing '_client' since the weaviate helper usually closes the connection every time
|
74 |
|
75 |
self.indexer = None
|