Update README.md
Browse files
README.md
CHANGED
@@ -38,6 +38,7 @@ You can use the models through Huggingface's Transformers library. Check our Git
|
|
38 |
```python
|
39 |
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
|
40 |
import torch
|
|
|
41 |
import re
|
42 |
from string import Template
|
43 |
prompt_template = Template("Human: ${inst} </s> Assistant: ")
|
@@ -79,7 +80,7 @@ abc_pattern = r'(X:\d+\n(?:[^\n]*\n)+)'
|
|
79 |
abc_notation = re.findall(abc_pattern, response+'\n')[0]
|
80 |
s = Score.from_abc(abc_notation)
|
81 |
audio = Synthesizer().render(s, stereo=True)
|
82 |
-
|
83 |
```
|
84 |
|
85 |
## Chat demo
|
|
|
38 |
```python
|
39 |
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
|
40 |
import torch
|
41 |
+
import torchaudio
|
42 |
import re
|
43 |
from string import Template
|
44 |
prompt_template = Template("Human: ${inst} </s> Assistant: ")
|
|
|
80 |
abc_notation = re.findall(abc_pattern, response+'\n')[0]
|
81 |
s = Score.from_abc(abc_notation)
|
82 |
audio = Synthesizer().render(s, stereo=True)
|
83 |
+
torchaudio.save('cm_music_piece.wav', torch.FloatTensor(audio), 44100)
|
84 |
```
|
85 |
|
86 |
## Chat demo
|