Update README.md
Browse files
README.md
CHANGED
@@ -44,18 +44,18 @@ quantized_by: Second State Inc.
|
|
44 |
- Run as LlamaEdge service
|
45 |
|
46 |
```bash
|
47 |
-
wasmedge --dir .:. --nn-preload default:GGML:AUTO:gemma-7b-it-Q5_K_M.gguf \
|
48 |
llama-api-server.wasm \
|
49 |
--prompt-template gemma-instruct \
|
50 |
--ctx-size 3072 \
|
51 |
--model-name gemma-1.1-7b
|
52 |
```
|
53 |
|
54 |
-
|
55 |
|
56 |
```bash
|
57 |
-
wasmedge --dir .:. --nn-preload default:GGML:AUTO:gemma-7b-it-Q5_K_M.gguf llama-chat.wasm -p gemma-instruct -c
|
58 |
-
```
|
59 |
|
60 |
## Quantized GGUF Models
|
61 |
|
|
|
44 |
- Run as LlamaEdge service
|
45 |
|
46 |
```bash
|
47 |
+
wasmedge --dir .:. --nn-preload default:GGML:AUTO:gemma-1.1-7b-it-Q5_K_M.gguf \
|
48 |
llama-api-server.wasm \
|
49 |
--prompt-template gemma-instruct \
|
50 |
--ctx-size 3072 \
|
51 |
--model-name gemma-1.1-7b
|
52 |
```
|
53 |
|
54 |
+
- Run as LlamaEdge command app
|
55 |
|
56 |
```bash
|
57 |
+
wasmedge --dir .:. --nn-preload default:GGML:AUTO:gemma-1.1-7b-it-Q5_K_M.gguf llama-chat.wasm -p gemma-instruct -c 3072
|
58 |
+
```
|
59 |
|
60 |
## Quantized GGUF Models
|
61 |
|