Firsttrial / app.py
Vetri04's picture
Update app.py
ce7ed69 verified
import gradio as gr
import pandas as pd
from transformers import AutoModelForCausalLM, AutoTokenizer
# Load the dataset
df = pd.read_csv("subdirectory_name/climate_data.csv")
# Load the LLaMa model and tokenizer
model_name = "huggingface/llama" # Replace with actual LLaMa model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Function to get model responses
def ask_llama(question):
inputs = tokenizer(question, return_tensors="pt")
outputs = model.generate(inputs.input_ids, max_length=100)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Function to fetch data from the dataset
def fetch_data_from_dataset(query, df):
if "year" in query:
year = int(query.split("year")[-1].strip())
return df[df['year'] == year].to_dict(orient="records")
if "scenario" in query:
scenario = query.split("scenario")[-1].strip()
columns = [col for col in df.columns if scenario in col]
return df[columns].to_dict(orient="records")
return "Sorry, I couldn't find any relevant data."
# Combined function to answer user questions
def answer_question(query):
# Step 1: Get response from LLaMa model
llama_response = ask_llama(query)
# Step 2: Fetch data based on response
data_response = fetch_data_from_dataset(llama_response, df)
return data_response
# Define the Gradio interface
interface = gr.Interface(fn=answer_question, inputs="text", outputs="text",
title="Climate Data Explorer",
description="Ask questions about climate data")
# Launch the app
interface.launch()