datascientist22
commited on
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
3 |
+
|
4 |
+
# Load the tokenizer and model
|
5 |
+
tokenizer = AutoTokenizer.from_pretrained("MohamedMotaz/Examination-llama-8b-4bit")
|
6 |
+
model = AutoModelForCausalLM.from_pretrained("MohamedMotaz/Examination-llama-8b-4bit")
|
7 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
8 |
+
|
9 |
+
# App Title
|
10 |
+
st.title("Exam Corrector: Automated Grading with LLama 8b Model")
|
11 |
+
|
12 |
+
# Instructions
|
13 |
+
st.markdown("""
|
14 |
+
### Instructions:
|
15 |
+
- Upload or type both the **Model Answer** and the **Student Answer**.
|
16 |
+
- Click on the **Grade Answer** button to get the grade and explanation.
|
17 |
+
""")
|
18 |
+
|
19 |
+
# Input fields for Model Answer and Student Answer
|
20 |
+
model_answer = st.text_area("Model Answer", "The process of photosynthesis involves converting light energy into chemical energy.")
|
21 |
+
student_answer = st.text_area("Student Answer", "Photosynthesis is when plants turn light into energy.")
|
22 |
+
|
23 |
+
# Display documentation in the app
|
24 |
+
with st.expander("Click to View Documentation"):
|
25 |
+
st.markdown("""
|
26 |
+
## Exam-Corrector: A Fine-tuned LLama 8b Model
|
27 |
+
|
28 |
+
Exam-corrector is a fine-tuned version of the LLama 8b model, specifically adapted to function as a written question corrector. This model grades student answers by comparing them against model answers using predefined instructions.
|
29 |
+
|
30 |
+
### Model Description:
|
31 |
+
The model ensures consistent and fair grading for written answers. Full marks are given to student answers that convey the complete meaning of the model answer, even with different wording.
|
32 |
+
|
33 |
+
### Grading Instructions:
|
34 |
+
- Model Answer is only used as a reference and does not receive marks.
|
35 |
+
- Full marks are awarded when student answers convey the full meaning of the model answer.
|
36 |
+
- Partial marks are deducted for incomplete or irrelevant information.
|
37 |
+
|
38 |
+
### Input Format:
|
39 |
+
- **Model Answer**: {model_answer}
|
40 |
+
- **Student Answer**: {student_answer}
|
41 |
+
|
42 |
+
### Output Format:
|
43 |
+
- **Grade**: {grade}
|
44 |
+
- **Explanation**: {explanation}
|
45 |
+
|
46 |
+
### Training Details:
|
47 |
+
- Fine-tuned with LoRA (Low-Rank Adaptation).
|
48 |
+
- Percentage of trainable model parameters: 3.56%.
|
49 |
+
""")
|
50 |
+
|
51 |
+
# Button to trigger grading
|
52 |
+
if st.button("Grade Answer"):
|
53 |
+
# Combine inputs into the required prompt format
|
54 |
+
inputs = f"Model Answer: {model_answer}\n\nStudent Answer: {student_answer}\n\nResponse:"
|
55 |
+
|
56 |
+
# Tokenize the inputs
|
57 |
+
input_ids = tokenizer(inputs, return_tensors="pt").input_ids
|
58 |
+
|
59 |
+
# Generate the response using the model
|
60 |
+
outputs = model.generate(input_ids)
|
61 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
62 |
+
|
63 |
+
# Display the grade and explanation
|
64 |
+
st.subheader("Grading Results")
|
65 |
+
st.write(response)
|
66 |
+
|
67 |
+
# Footer and app creator details
|
68 |
+
st.markdown("""
|
69 |
+
---
|
70 |
+
**App created by [Engr. Hamesh Raj](https://www.linkedin.com/in/hamesh-raj)**
|
71 |
+
""")
|