TensoraCO loubnabnl HF staff commited on
Commit
5e2d95e
0 Parent(s):

Duplicate from bigcode/santa-explains-code

Browse files

Co-authored-by: Loubna Ben Allal <[email protected]>

Files changed (4) hide show
  1. .gitattributes +31 -0
  2. README.md +14 -0
  3. app.py +75 -0
  4. requirements.txt +2 -0
.gitattributes ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.model filter=lfs diff=lfs merge=lfs -text
11
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
12
+ *.npy filter=lfs diff=lfs merge=lfs -text
13
+ *.npz filter=lfs diff=lfs merge=lfs -text
14
+ *.onnx filter=lfs diff=lfs merge=lfs -text
15
+ *.ot filter=lfs diff=lfs merge=lfs -text
16
+ *.parquet filter=lfs diff=lfs merge=lfs -text
17
+ *.pickle filter=lfs diff=lfs merge=lfs -text
18
+ *.pkl filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pt filter=lfs diff=lfs merge=lfs -text
21
+ *.pth filter=lfs diff=lfs merge=lfs -text
22
+ *.rar filter=lfs diff=lfs merge=lfs -text
23
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
24
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
25
+ *.tflite filter=lfs diff=lfs merge=lfs -text
26
+ *.tgz filter=lfs diff=lfs merge=lfs -text
27
+ *.wasm filter=lfs diff=lfs merge=lfs -text
28
+ *.xz filter=lfs diff=lfs merge=lfs -text
29
+ *.zip filter=lfs diff=lfs merge=lfs -text
30
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
31
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Santa Explains Code
3
+ emoji: 🎅
4
+ colorFrom: red
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ sdk_version: 3.0.24
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ duplicated_from: bigcode/santa-explains-code
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed, pipeline
3
+
4
+
5
+ title = "🎅 Santa Explains Code"
6
+ description = "This space converts Python code into English text that explains its function using [SantaCoder-Code-To-Text](https://huggingface.co/loubnabnl/santacoder-code-to-text), \
7
+ a code generation model that was fine-tuned on the [github-jupyter-code-to-text](https://huggingface.co/datasets/codeparrot/github-jupyter-code-to-text) dataset. \
8
+ This dataset includes Python code accompanied by docstrings that explain it. The data was sourced from Jupyter notebooks.\n\n\
9
+ Limitations: The model was fine-tuned on a small dataset from Jupyter Notebooks, so it can only explain simple, \
10
+ common functions that are found in these notebooks, in a similar fashion to the text in markdown cells. It might also be sensitive to function names and comments."
11
+
12
+ EXAMPLE_0 = "def function(sequence):\n return [x for x in sequence if x % 2 == 0]"
13
+ EXAMPLE_1 = "from sklearn import model_selection\nX_train, X_test, Y_train, Y_test = model_selection.train_test_split(X, Y, test_size=0.2)"
14
+ EXAMPLE_2 = "def load_text(file)\n with open(filename, 'r') as f:\n text = f.read()\n return text"
15
+ EXAMPLE_3 = "net.zero_grad()\nloss.backward()"
16
+ EXAMPLE_4 = "net.zero_grad()\nloss.backward()\n\nnoptimizer.step()"
17
+ EXAMPLE_5 = "def sort_function(arr):\n n = len(arr)\n \n # Traverse through all array elements\n for i in range(n):\n \n # Last i elements are already in place\n for j in range(0, n-i-1):\n \n # traverse the array from 0 to n-i-1\n # Swap if the element found is greater\n # than the next element\n if arr[j] > arr[j+1]:\n arr[j], arr[j+1] = arr[j+1], arr[j]"
18
+
19
+ example = [
20
+ [EXAMPLE_0, 32, 0.6, 42],
21
+ [EXAMPLE_1, 34, 0.4, 42],
22
+ [EXAMPLE_2, 11, 0.6, 42],
23
+ [EXAMPLE_3, 30, 0.6, 42],
24
+ [EXAMPLE_4, 46, 0.6, 42],
25
+ [EXAMPLE_5, 32, 0.6, 42],
26
+ ]
27
+
28
+
29
+ tokenizer = AutoTokenizer.from_pretrained("loubnabnl/santacoder-code-to-text")
30
+ model = AutoModelForCausalLM.from_pretrained("loubnabnl/santacoder-code-to-text", trust_remote_code=True)
31
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
32
+
33
+ def make_doctring(gen_prompt):
34
+ return gen_prompt + f"\n\n\"\"\"\nExplanation:"
35
+
36
+ def code_generation(gen_prompt, max_tokens, temperature=0.6, seed=42):
37
+ set_seed(seed)
38
+ prompt = make_doctring(gen_prompt)
39
+ generated_text = pipe(prompt, do_sample=True, top_p=0.95, temperature=temperature, max_new_tokens=max_tokens)[0]['generated_text']
40
+ return generated_text
41
+
42
+
43
+ iface = gr.Interface(
44
+ fn=code_generation,
45
+ inputs=[
46
+ gr.Textbox(lines=10, label="Python code"),
47
+ gr.inputs.Slider(
48
+ minimum=8,
49
+ maximum=256,
50
+ step=1,
51
+ default=8,
52
+ label="Number of tokens to generate",
53
+ ),
54
+ gr.inputs.Slider(
55
+ minimum=0,
56
+ maximum=2.5,
57
+ step=0.1,
58
+ default=0.6,
59
+ label="Temperature",
60
+ ),
61
+ gr.inputs.Slider(
62
+ minimum=0,
63
+ maximum=1000,
64
+ step=1,
65
+ default=42,
66
+ label="Random seed to use for the generation"
67
+ )
68
+ ],
69
+ outputs=gr.Textbox(label="Predicted explanation", lines=10),
70
+ examples=example,
71
+ layout="horizontal",
72
+ description=description,
73
+ title=title
74
+ )
75
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ transformers==4.19.0
2
+ torch==1.11.0