Edit model card
from unsloth.chat_templates import get_chat_template


tokenizer = get_chat_template(
    tokenizer,
    chat_template = "chatml", # Supports zephyr, chatml, mistral, llama, alpaca, vicuna, vicuna_old, unsloth
    mapping = {"role" : "role", "content" : "content", "user" : "user", "assistant" : "assistant","system":"system"}, # ShareGPT style
    map_eos_token = True, # Maps <|im_end|> to </s> instead
)

def ask(text):
    chat1 = [
    [
        {"role": "system", "content": "[Role:Translator] [Language:English]"},
        {"role": "user", "content": text},
    ],
    [
        {"role": "system", "content": "[Role:Translator] [Language:Thai]"},
        {"role": "user", "content": text},
    ],
    [
        {"role": "system", "content": "[Role:Assistant] [Language:English]"},
        {"role": "user", "content": text},
    ],
    [
        {"role": "system", "content": "[Role:Assistant] [Language:Thai]"},
        {"role": "user", "content": text},
    ]
    ]
    
    input_ids = tokenizer.apply_chat_template(chat1, add_generation_prompt=True,  tokenize = True, return_tensors = "pt").to("cuda")
    
    outputs = model.generate(input_ids = input_ids, max_new_tokens = 64, use_cache = True)
    decoded = tokenizer.batch_decode(outputs[:, input_ids.shape[1]:],skip_special_tokens=True)

    print("=========================[Role:Translator] [Language:English]=========================")
    print(decoded[0])
    print("=========================[Role:Translator] [Language:Thai]=========================")
    print(decoded[1])
    print("=========================[Role:Assistant] [Language:English]=========================")
    print(decoded[2])
    print("=========================[Role:Assistant] [Language:Thai]=========================")
    print(decoded[3])
   
Downloads last month
0
Inference API
Unable to determine this model’s pipeline type. Check the docs .

Model tree for ping98k/gemma-2b-cross-0.1

Adapter
(12)
this model