Vijayendra commited on
Commit
d6e3348
1 Parent(s): 1b86984

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -45,14 +45,14 @@ input_prompts = [
45
  # Generate responses
46
  generated_responses = {}
47
  for prompt in input_prompts:
48
- inputs = tokenizer(prompt, return_tensors="pt", max_length=400, truncation=True, padding="max_length").to(device)
49
 
50
  model.eval()
51
  with torch.no_grad():
52
  generated_ids = model.generate(
53
  input_ids=inputs['input_ids'],
54
  attention_mask=inputs['attention_mask'],
55
- max_length=40,
56
  num_beams=7,
57
  repetition_penalty=2.5,
58
  length_penalty=2.0,
@@ -66,4 +66,4 @@ for prompt in input_prompts:
66
  # Display the input prompts and the generated responses
67
  for prompt, response in generated_responses.items():
68
  print(f"Prompt: {prompt}")
69
- print(f"Response: {response}\n")
 
45
  # Generate responses
46
  generated_responses = {}
47
  for prompt in input_prompts:
48
+ inputs = tokenizer(prompt, return_tensors="pt", max_length=40, truncation=True, padding="max_length").to(device)
49
 
50
  model.eval()
51
  with torch.no_grad():
52
  generated_ids = model.generate(
53
  input_ids=inputs['input_ids'],
54
  attention_mask=inputs['attention_mask'],
55
+ max_length=100,
56
  num_beams=7,
57
  repetition_penalty=2.5,
58
  length_penalty=2.0,
 
66
  # Display the input prompts and the generated responses
67
  for prompt, response in generated_responses.items():
68
  print(f"Prompt: {prompt}")
69
+ print(f"Response: {response}\n")