JPBianchi commited on
Commit
e77c4bd
1 Parent(s): b88503a

added nltk_data creation

Browse files
Files changed (1) hide show
  1. Dockerfile +13 -13
Dockerfile CHANGED
@@ -19,9 +19,21 @@ RUN apt-get update && apt-get install -y \
19
  wget \
20
  git git-lfs \
21
  bash \
22
- && rm -rf /var/lib/apt/lists/*
23
  # reflex needs unzip curl
24
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  RUN pip install --no-cache-dir --upgrade -r app/requirements.txt
26
  # ^ no caching of the packages to save space
27
  RUN pip install --no-cache-dir --upgrade -r requirements.txt
@@ -36,16 +48,4 @@ RUN guardrails hub install hub://guardrails/detect_pii
36
  RUN guardrails hub install hub://guardrails/qa_relevance_llm_eval
37
 
38
 
39
- # RUN python -c "import nltk; nltk.download('stopwords')"
40
- # ^ to fix runtime error, see https://github.com/run-llama/llama_index/issues/10681
41
- # it didn't work, I had to do chmod below (as also suggested in the article)
42
-
43
- RUN chmod -R 777 /usr/local/lib/python3.10/site-packages/llama_index/legacy/_static/nltk_cache
44
- RUN mkdir -p /.cache
45
- RUN chmod -R 777 /.cache
46
-
47
- ENV TRANSFORMERS_CACHE=/usr/local/lib/python3.10/site-packages/llama_index/legacy/_static/nltk_cache
48
- # ^ not elegant but it works
49
- # HF warning says that TRANSFORMERS_CACHE will be deprecated in transformers v5, and advise to use HF_HOME
50
-
51
  CMD ["uvicorn", "app.main_reflex:app", "--host", "0.0.0.0", "--port", "7860"]
 
19
  wget \
20
  git git-lfs \
21
  bash \
22
+ && rm -rf /var/lib/apt/lists/*2
23
  # reflex needs unzip curl
24
 
25
+ RUN mkdir -p /nltk_data
26
+ RUN chmod -R 777 /nltk_data
27
+
28
+ RUN chmod -R 777 /usr/local/lib/python3.10/site-packages/llama_index/legacy/_static/nltk_cache
29
+ RUN mkdir -p /.cache
30
+ RUN chmod -R 777 /.cache
31
+
32
+ ENV TRANSFORMERS_CACHE=/usr/local/lib/python3.10/site-packages/llama_index/legacy/_static/nltk_cache
33
+ # ^ not elegant but it works
34
+ # HF warning says that TRANSFORMERS_CACHE will be deprecated in transformers v5, and advise to use HF_HOME
35
+
36
+
37
  RUN pip install --no-cache-dir --upgrade -r app/requirements.txt
38
  # ^ no caching of the packages to save space
39
  RUN pip install --no-cache-dir --upgrade -r requirements.txt
 
48
  RUN guardrails hub install hub://guardrails/qa_relevance_llm_eval
49
 
50
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  CMD ["uvicorn", "app.main_reflex:app", "--host", "0.0.0.0", "--port", "7860"]