Update README.md
Browse files
README.md
CHANGED
@@ -30,14 +30,7 @@ model = AutoModelForCausalLM.from_pretrained(model_name,
|
|
30 |
torch_dtype=torch.bfloat16,
|
31 |
)
|
32 |
|
33 |
-
|
34 |
-
|
35 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name,add_bos_token=True,trust_remote_code=True)
|
36 |
-
|
37 |
-
model_input = tokenizer(eval_prompt, return_tensors="pt").to("cuda")
|
38 |
-
|
39 |
-
model_to_save.config.use_cache = True
|
40 |
-
|
41 |
def stream(user_prompt):
|
42 |
runtimeFlag = "cuda:0"
|
43 |
system_prompt = 'The following is an excerpt from MODULAR_MOJO from the section on roadmap.'
|
@@ -47,5 +40,11 @@ def stream(user_prompt):
|
|
47 |
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
48 |
_ = model.generate(**inputs, streamer=streamer, max_new_tokens=200)
|
49 |
|
50 |
-
stream("
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
```
|
|
|
30 |
torch_dtype=torch.bfloat16,
|
31 |
)
|
32 |
|
33 |
+
model.config.use_cache = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
def stream(user_prompt):
|
35 |
runtimeFlag = "cuda:0"
|
36 |
system_prompt = 'The following is an excerpt from MODULAR_MOJO from the section on roadmap.'
|
|
|
40 |
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
41 |
_ = model.generate(**inputs, streamer=streamer, max_new_tokens=200)
|
42 |
|
43 |
+
stream("""can you translate this python code to mojo to make more performant making T as struct?
|
44 |
+
class T():
|
45 |
+
self.init(v:float):
|
46 |
+
self.value=v
|
47 |
+
|
48 |
+
def sum_objects(a:T,b:T)->T:
|
49 |
+
return T(a.v+b.v)""")
|
50 |
```
|