|
import streamlit as st |
|
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("MohamedMotaz/Examination-llama-8b-4bit") |
|
model = AutoModelForCausalLM.from_pretrained("MohamedMotaz/Examination-llama-8b-4bit") |
|
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) |
|
|
|
|
|
st.title("Exam Corrector: Automated Grading with LLama 8b Model") |
|
|
|
|
|
st.markdown(""" |
|
### Instructions: |
|
- Upload or type both the **Model Answer** and the **Student Answer**. |
|
- Click on the **Grade Answer** button to get the grade and explanation. |
|
""") |
|
|
|
|
|
model_answer = st.text_area("Model Answer", "The process of photosynthesis involves converting light energy into chemical energy.") |
|
student_answer = st.text_area("Student Answer", "Photosynthesis is when plants turn light into energy.") |
|
|
|
|
|
with st.expander("Click to View Documentation"): |
|
st.markdown(""" |
|
## Exam-Corrector: A Fine-tuned LLama 8b Model |
|
|
|
Exam-corrector is a fine-tuned version of the LLama 8b model, specifically adapted to function as a written question corrector. This model grades student answers by comparing them against model answers using predefined instructions. |
|
|
|
### Model Description: |
|
The model ensures consistent and fair grading for written answers. Full marks are given to student answers that convey the complete meaning of the model answer, even with different wording. |
|
|
|
### Grading Instructions: |
|
- Model Answer is only used as a reference and does not receive marks. |
|
- Full marks are awarded when student answers convey the full meaning of the model answer. |
|
- Partial marks are deducted for incomplete or irrelevant information. |
|
|
|
### Input Format: |
|
- **Model Answer**: {model_answer} |
|
- **Student Answer**: {student_answer} |
|
|
|
### Output Format: |
|
- **Grade**: {grade} |
|
- **Explanation**: {explanation} |
|
|
|
### Training Details: |
|
- Fine-tuned with LoRA (Low-Rank Adaptation). |
|
- Percentage of trainable model parameters: 3.56%. |
|
""") |
|
|
|
|
|
if st.button("Grade Answer"): |
|
|
|
inputs = f"Model Answer: {model_answer}\n\nStudent Answer: {student_answer}\n\nResponse:" |
|
|
|
|
|
input_ids = tokenizer(inputs, return_tensors="pt").input_ids |
|
|
|
|
|
outputs = model.generate(input_ids) |
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
st.subheader("Grading Results") |
|
st.write(response) |
|
|
|
|
|
st.markdown(""" |
|
--- |
|
**App created by [Engr. Hamesh Raj](https://www.linkedin.com/in/hamesh-raj)** |
|
""") |