doof-ferb commited on
Commit
554cb7e
1 Parent(s): 0cfd9d3

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,12 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ model-bf16.gguf filter=lfs diff=lfs merge=lfs -text
37
+ model-f16.gguf filter=lfs diff=lfs merge=lfs -text
38
+ model-f32.gguf filter=lfs diff=lfs merge=lfs -text
39
+ model-q4_k_m.gguf filter=lfs diff=lfs merge=lfs -text
40
+ model-q4_k_s.gguf filter=lfs diff=lfs merge=lfs -text
41
+ model-q5_k_m.gguf filter=lfs diff=lfs merge=lfs -text
42
+ model-q5_k_s.gguf filter=lfs diff=lfs merge=lfs -text
43
+ model-q6_k.gguf filter=lfs diff=lfs merge=lfs -text
44
+ model-q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-4.0
3
+ base_model: BAAI/bge-m3
4
+ language: ["vi"]
5
+ library_name: sentence-transformers
6
+ pipeline_tag: sentence-similarity
7
+ inference: false
8
+ ---
9
+
10
+ # `BAAI/bge-m3` in GGUF format
11
+
12
+ original: https://huggingface.co/BAAI/bge-m3
13
+
14
+ quantization:
15
+ ```bash
16
+ REL=b3827 # can change to a later release
17
+ wget https://github.com/ggerganov/llama.cpp/releases/download/$REL/llama-$REL-bin-ubuntu-x64.zip --content-disposition --continue &> /dev/null
18
+ wget https://github.com/ggerganov/llama.cpp/archive/refs/tags/$REL.zip --content-disposition --continue &> /dev/null
19
+ unzip -q llama-$REL-bin-ubuntu-x64.zip
20
+ unzip -q llama.cpp-$REL.zip
21
+ mv llama.cpp-$REL/* .
22
+ rm -r llama.cpp-$REL/ llama-$REL-bin-ubuntu-x64.zip llama.cpp-$REL.zip
23
+ pip install -q -r requirements.txt
24
+
25
+ rm -rf models/tmp/
26
+ git clone --depth=1 --single-branch https://huggingface.co/BAAI/bge-m3 models/tmp
27
+ python convert_hf_to_gguf.py models/tmp/ --outfile model-f32.gguf --outtype f32
28
+
29
+ build/bin/llama-quantize model-f32.gguf model-f16.gguf f16 2> /dev/null
30
+ build/bin/llama-quantize model-f32.gguf model-bf16.gguf bf16 2> /dev/null
31
+ build/bin/llama-quantize model-f32.gguf model-q8_0.gguf q8_0 2> /dev/null
32
+ build/bin/llama-quantize model-f32.gguf model-q6_k.gguf q6_k 2> /dev/null
33
+ build/bin/llama-quantize model-f32.gguf model-q5_k_m.gguf q5_k_m 2> /dev/null
34
+ build/bin/llama-quantize model-f32.gguf model-q5_k_s.gguf q5_k_s 2> /dev/null
35
+ build/bin/llama-quantize model-f32.gguf model-q4_k_m.gguf q4_k_m 2> /dev/null
36
+ build/bin/llama-quantize model-f32.gguf model-q4_k_s.gguf q4_k_s 2> /dev/null
37
+
38
+ rm -rf models/yolo/
39
+ mkdir -p models/yolo
40
+ mv model-*.gguf models/yolo/
41
+ touch models/yolo/README.md
42
+ huggingface-cli upload bge-m3-gguf models/yolo .
43
+ ```
44
+
45
+ usage:
46
+ ```bash
47
+ build/bin/llama-embedding -m model-q5_k_m.gguf -p "Cô ấy cười nói suốt cả ngày" --embd-output-format array 2> /dev/null
48
+ # OR
49
+ build/bin/llama-server --embedding -c 128 -m model-q5_k_m.gguf
50
+ ```
model-bf16.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:874b570fa474e29c3f6bfee48efa4f2f89ed5c0025fe881726e97ef661768ea2
3
+ size 1157671200
model-f16.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:daec91ffb5dd0c27411bd71f29932917c49cf529a641d0168496c3a501e3062c
3
+ size 1157671200
model-f32.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d925235eecc1c73cbbac5a268e6e8562c82bf341ca36f1c7465bdfd316e63518
3
+ size 2273655072
model-q4_k_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fe3a00f9e92cc07f9745c69d25e17e234e1a7f8cde6e7ebe930dc7516e9accf
3
+ size 437778496
model-q4_k_s.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f141d3fdc26f5aab6e18712ba89841f948d1f668b1c3e20ca8bd1dbdd34030b
3
+ size 423655488
model-q5_k_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94b52ea5a93d9c9246a4e10cc8efb113267364451d57f59218f258b08966f536
3
+ size 467662912
model-q5_k_s.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90a9c29e7a70806c64f22ae277e4aad5766f7f9e118adfee9137b09fd5789dec
3
+ size 459307072
model-q6_k.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df937a62eea9acbc6990f249558101c545931d72e4aa4e8a4cdbd462895b21b3
3
+ size 499415104
model-q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:950f4a8e5e19477a6d3c26d2f162233c20002c601f75e4b002e3239997821167
3
+ size 634553760