p1atdev commited on
Commit
c0bee89
1 Parent(s): 01c1073

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -38,14 +38,14 @@ MODEL_NAME = "p1atdev/dart-v1-sft"
38
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True) # trust_remote_code is required for tokenizer
39
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.bfloat16)
40
 
41
- prompt = "<|bos|><rating>rating:sfw, rating:general</rating><copyright>original</copyright><character></character><general>1girl, "
42
  inputs = tokenizer(prompt, return_tensors="pt").input_ids
43
 
44
  with torch.no_grad():
45
  outputs = model.generate(inputs, generation_config=generation_config)
46
 
47
  print(tokenizer.decode(outputs[0], skip_special_tokens=True))
48
- # rating:sfw, rating:general, original, 1girl, ahoge, black hair, blue eyes, blush, closed mouth, ear piercing, earrings, jewelry, looking at viewer, mole, mole under eye, piercing, portrait, shirt, short hair, solo, white shirt
49
  ```
50
 
51
  #### Flash attention (optional)
@@ -86,7 +86,7 @@ ort_model = ORTModelForCausalLM.from_pretrained(MODEL_NAME)
86
  # qunatized version
87
  # ort_model = ORTModelForCausalLM.from_pretrained(MODEL_NAME, file_name="model_quantized.onnx")
88
 
89
- prompt = "<|bos|><rating>rating:sfw, rating:general</rating><copyright>original</copyright><character></character><general>1girl, "
90
  inputs = tokenizer(prompt, return_tensors="pt").input_ids
91
 
92
  with torch.no_grad():
 
38
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True) # trust_remote_code is required for tokenizer
39
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.bfloat16)
40
 
41
+ prompt = "<|bos|><rating>rating:sfw, rating:general</rating><copyright>original</copyright><character></character><general><|long|>1girl<|input_end|>"
42
  inputs = tokenizer(prompt, return_tensors="pt").input_ids
43
 
44
  with torch.no_grad():
45
  outputs = model.generate(inputs, generation_config=generation_config)
46
 
47
  print(tokenizer.decode(outputs[0], skip_special_tokens=True))
48
+ # rating:sfw, rating:general, 1girl, ahoge, braid, closed eyes, collared dress, dress, flower, full body, hair flower, hair ornament, long hair, night, night sky, outdoors, parted lips, pink flower, pink hair, short sleeves, sky, solo, straight hair, sunflower, very long hair, white flower
49
  ```
50
 
51
  #### Flash attention (optional)
 
86
  # qunatized version
87
  # ort_model = ORTModelForCausalLM.from_pretrained(MODEL_NAME, file_name="model_quantized.onnx")
88
 
89
+ prompt = "<|bos|><rating>rating:sfw, rating:general</rating><copyright>original</copyright><character></character><general><|long|>1girl<|input_end|>"
90
  inputs = tokenizer(prompt, return_tensors="pt").input_ids
91
 
92
  with torch.no_grad():