datascientist22 commited on
Commit
253e08c
1 Parent(s): 3d168b6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -11
app.py CHANGED
@@ -1,18 +1,18 @@
1
  import streamlit as st
2
- from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
 
3
 
4
- # Load the tokenizer and model
5
  tokenizer = AutoTokenizer.from_pretrained("MohamedMotaz/Examination-llama-8b-4bit")
6
- model = AutoModelForCausalLM.from_pretrained("MohamedMotaz/Examination-llama-8b-4bit")
7
- pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
8
 
9
  # App Title
10
- st.title("Exam Corrector: Automated Grading with LLama 8b Model")
11
 
12
  # Instructions
13
  st.markdown("""
14
  ### Instructions:
15
- - Upload or type both the **Model Answer** and the **Student Answer**.
16
  - Click on the **Grade Answer** button to get the grade and explanation.
17
  """)
18
 
@@ -52,12 +52,15 @@ with st.expander("Click to View Documentation"):
52
  if st.button("Grade Answer"):
53
  # Combine inputs into the required prompt format
54
  inputs = f"Model Answer: {model_answer}\n\nStudent Answer: {student_answer}\n\nResponse:"
55
-
56
- # Tokenize the inputs
57
- input_ids = tokenizer(inputs, return_tensors="pt").input_ids
58
 
59
- # Generate the response using the model
60
- outputs = model.generate(input_ids)
 
 
 
 
 
 
61
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
62
 
63
  # Display the grade and explanation
 
1
  import streamlit as st
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
 
5
+ # Load the tokenizer and model using PyTorch
6
  tokenizer = AutoTokenizer.from_pretrained("MohamedMotaz/Examination-llama-8b-4bit")
7
+ model = AutoModelForCausalLM.from_pretrained("MohamedMotaz/Examination-llama-8b-4bit", torch_dtype=torch.float16).to("cuda" if torch.cuda.is_available() else "cpu")
 
8
 
9
  # App Title
10
+ st.title("Exam Corrector: Automated Grading with LLama 8b Model (PyTorch)")
11
 
12
  # Instructions
13
  st.markdown("""
14
  ### Instructions:
15
+ - Enter both the **Model Answer** and the **Student Answer**.
16
  - Click on the **Grade Answer** button to get the grade and explanation.
17
  """)
18
 
 
52
  if st.button("Grade Answer"):
53
  # Combine inputs into the required prompt format
54
  inputs = f"Model Answer: {model_answer}\n\nStudent Answer: {student_answer}\n\nResponse:"
 
 
 
55
 
56
+ # Tokenize the inputs using PyTorch tensors
57
+ input_ids = tokenizer(inputs, return_tensors="pt").input_ids.to(model.device)
58
+
59
+ # Generate the response using the model (PyTorch)
60
+ with torch.no_grad():
61
+ outputs = model.generate(input_ids, max_length=200)
62
+
63
+ # Decode the output
64
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
65
 
66
  # Display the grade and explanation