ayoubkirouane commited on
Commit
576c685
1 Parent(s): 101e210

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -7
README.md CHANGED
@@ -66,28 +66,27 @@ from peft import PeftModel, PeftConfig
66
  from transformers import AutoModelForCausalLM , BitsAndBytesConfig
67
  import torch
68
 
69
- #config = PeftConfig.from_pretrained("ayoubkirouane/Llama2_13B_startup_hf")
70
  bnb_config = BitsAndBytesConfig(
71
  load_in_4bit=True,
72
  bnb_4bit_quant_type="nf4",
73
  bnb_4bit_compute_dtype=getattr(torch, "float16"),
74
  bnb_4bit_use_double_quant=False)
75
  model = AutoModelForCausalLM.from_pretrained(
76
- "meta-llama/Llama-2-7b-hf",
77
  quantization_config=bnb_config,
78
  device_map={"": 0})
79
  model.config.use_cache = False
80
  model.config.pretraining_tp = 1
81
- model = PeftModel.from_pretrained(model, "TuningAI/Llama2_7B_Cover_letter_generator")
82
- tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf" , trust_remote_code=True)
83
  tokenizer.pad_token = tokenizer.eos_token
84
  tokenizer.padding_side = "right"
85
- Instruction = "Given a user's information about the target job, you will generate a Cover letter for this job based on this information."
86
  while 1:
87
  input_text = input(">>>")
88
  logging.set_verbosity(logging.CRITICAL)
89
- prompt = f"### Instruction\n{Instruction}.\n ###Input \n\n{input_text}. ### Output:"
90
- pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer,max_length=400)
91
  result = pipe(prompt)
92
  print(result[0]['generated_text'].replace(prompt, ''))
93
  ```
 
66
  from transformers import AutoModelForCausalLM , BitsAndBytesConfig
67
  import torch
68
 
 
69
  bnb_config = BitsAndBytesConfig(
70
  load_in_4bit=True,
71
  bnb_4bit_quant_type="nf4",
72
  bnb_4bit_compute_dtype=getattr(torch, "float16"),
73
  bnb_4bit_use_double_quant=False)
74
  model = AutoModelForCausalLM.from_pretrained(
75
+ "meta-llama/Llama-2-13b-chat-hf",
76
  quantization_config=bnb_config,
77
  device_map={"": 0})
78
  model.config.use_cache = False
79
  model.config.pretraining_tp = 1
80
+ model = PeftModel.from_pretrained(model, "TuningAI/Llama2_13B_startup_Assistant")
81
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , trust_remote_code=True)
82
  tokenizer.pad_token = tokenizer.eos_token
83
  tokenizer.padding_side = "right"
84
+ system_message = "Given a user's startup-related question in English, you will generate a thoughtful answer in English."
85
  while 1:
86
  input_text = input(">>>")
87
  logging.set_verbosity(logging.CRITICAL)
88
+ prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n {input_text}. [/INST]"
89
+ pipe = pipeline(task="text-generation", model=new_model, tokenizer=tokenizer, max_length=512)
90
  result = pipe(prompt)
91
  print(result[0]['generated_text'].replace(prompt, ''))
92
  ```