yusufcakmak's picture
Update README.md
4a1492e verified
|
raw
history blame
1.25 kB
metadata
license: llama3
language:
  - tr
pipeline_tag: text-generation

Usage

from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline


model_id = "Trendyol/Trendyol-LLM-8b-chat-v2.0"

pipe = pipeline(
    "text-generation",
    model=model_id,
    model_kwargs={
        "torch_dtype": torch.bfloat16,
        "use_cache":True, 
        "use_flash_attention_2": True
    },
    device_map='auto',
)

terminators = [
    pipe.tokenizer.eos_token_id,
    pipe.tokenizer.convert_tokens_to_ids("<|eot_id|>")
]

sampling_params = dict(do_sample=True, temperature=0.3, top_k=50, top_p=0.9, repetition_penalty=1.1)
DEFAULT_SYSTEM_PROMPT = "Sen yardımsever bir asistansın ve sana verilen talimatlar doğrultusunda en iyi cevabı üretmeye çalışacaksın."

def generate_output(user_query, sys_prompt=DEFAULT_SYSTEM_PROMPT):
    messages = [
        {"role": "system", "content": sys_prompt},
        {"role": "user", "content": user_query}
    ]

    outputs = pipe(
        messages,
        max_new_tokens=1024,
        eos_token_id=terminators,
        return_full_text=False,
        **sampling_params
    )

    return outputs[0]["generated_text"]

response = generate_output("Türkiye'de kaç il var?")
print(response)