danrdoran's picture
Update app.py
e18a107 verified
import streamlit as st
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
# Load Math Arabic LLaMA model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Jr23xd23/Math_Arabic_Llama-3.2-3B-Instruct")
model = AutoModelForCausalLM.from_pretrained("Jr23xd23/Math_Arabic_Llama-3.2-3B-Instruct")
# Set up the Hugging Face pipeline for text-generation task with LLaMA model
model_pipeline = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
device=-1 # Ensure it runs on CPU (adjust if using GPU)
)
# Streamlit app UI
st.title("Math Tutor")
st.write("Ask me a math question in Arabic, and I will help you.")
# Sidebar for user to control model generation parameters
st.sidebar.title("Model Parameters")
temperature = st.sidebar.slider("Temperature", 0.1, 1.5, 1.0, 0.1) # Default 1.0
top_p = st.sidebar.slider("Top-p (Nucleus Sampling)", 0.0, 1.0, 0.9, 0.05) # Default 0.9
top_k = st.sidebar.slider("Top-k", 0, 100, 50, 1) # Default 50
do_sample = st.sidebar.checkbox("Enable Random Sampling", value=True) # Enable sampling
# Input field for the student
student_question = st.text_input("Ask your question in English or Arabic!")
# Generate and display response using the LLaMA model
if student_question:
# Adjust prompt to encourage student-friendly responses
prompt = f"Please explain the answer step by step in simple terms to a young student: '{student_question}'"
# Call the pipeline with adjusted parameters
response = model_pipeline(
prompt,
max_length=150, # Adjust this based on desired response length
temperature=temperature, # Control randomness
top_p=top_p, # Nucleus sampling
top_k=top_k, # Top-k sampling
do_sample=do_sample # Enable or disable sampling
)
st.write("Tutor's Answer:", response[0]['generated_text'])