Spaces:
Runtime error
Runtime error
%cd /content | |
!apt-get -y install -qq aria2 | |
!git clone -b v1.2 https://github.com/camenduru/text-generation-webui | |
%cd /content/text-generation-webui | |
!pip install -r requirements.txt | |
!pip install -U gradio==3.28.3 | |
!mkdir /content/text-generation-webui/repositories | |
%cd /content/text-generation-webui/repositories | |
!git clone -b v1.2 https://github.com/camenduru/GPTQ-for-LLaMa.git | |
%cd GPTQ-for-LLaMa | |
!python setup_cuda.py install | |
!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/4bit/gpt4-x-alpaca-13b-native-4bit-128g-cuda/raw/main/config.json -d /content/text-generation-webui/models/gpt4-x-alpaca-13b-native-4bit-128g-cuda -o config.json | |
!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/4bit/gpt4-x-alpaca-13b-native-4bit-128g-cuda/raw/main/generation_config.json -d /content/text-generation-webui/models/gpt4-x-alpaca-13b-native-4bit-128g-cuda -o generation_config.json | |
!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/4bit/gpt4-x-alpaca-13b-native-4bit-128g-cuda/raw/main/special_tokens_map.json -d /content/text-generation-webui/models/gpt4-x-alpaca-13b-native-4bit-128g-cuda -o special_tokens_map.json | |
!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/4bit/gpt4-x-alpaca-13b-native-4bit-128g-cuda/resolve/main/tokenizer.model -d /content/text-generation-webui/models/gpt4-x-alpaca-13b-native-4bit-128g-cuda -o tokenizer.model | |
!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/4bit/gpt4-x-alpaca-13b-native-4bit-128g-cuda/raw/main/tokenizer_config.json -d /content/text-generation-webui/models/gpt4-x-alpaca-13b-native-4bit-128g-cuda -o tokenizer_config.json | |
!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/4bit/gpt4-x-alpaca-13b-native-4bit-128g-cuda/resolve/main/4bit-128g.safetensors -d /content/text-generation-webui/models/gpt4-x-alpaca-13b-native-4bit-128g-cuda -o 4bit-128g.safetensors | |
%cd /content/text-generation-webui | |
!python server.py --share --chat --wbits 4 --groupsize 128 --model_type llama |