fact-check / app.py
Shreemit's picture
Upload app.py
deac359 verified
raw
history blame
2.7 kB
import streamlit as st
from getpass import getpass
from langchain_google_genai import GoogleGenerativeAI
from langchain.prompts import PromptTemplate
from typing import List, Tuple
from langchain.agents import AgentExecutor
from langchain.agents.format_scratchpad import format_to_openai_function_messages
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.utilities.tavily_search import TavilySearchAPIWrapper
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.agents import initialize_agent, AgentType
# Create the tool
search = TavilySearchAPIWrapper(tavily_api_key='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')
description = """"A search engine optimized for comprehensive, accurate, \
and trusted results. Useful for when you need to answer questions \
about current events or about recent information. \
Input should be a search query. \
If the user is asking about something that you don't know about, \
you should probably use this tool to see if that can provide any information."""
tavily_tool = TavilySearchResults(api_wrapper=search, description=description)
tools = [tavily_tool]
from getpass import getpass
# api_key = getpass()
llm = GoogleGenerativeAI(model="gemini-pro", google_api_key="AIzaSyBNfTHLMjR9vGiomZsW9NFsUTwc2U2NuFA")
prompt = ChatPromptTemplate.from_messages(
[
MessagesPlaceholder(variable_name="chat_history"),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
llm_with_tools = llm.bind(functions=tools)
# Set up Streamlit
st.title('Fact-Checking Chatbot')
# Get user input
user_input = st.text_input("Enter your question")
# If user has entered a question, generate response
if user_input:
response = llm.invoke(user_input)
st.write(response)
agent_chain = initialize_agent(
[tavily_tool],
llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
prompt = """
You are a fact-checker. You are asked to verify the following statement based on the information you get from your tool
and your knowledge. You should provide a response that is based on the information you have and that is as accurate as possible.
Your response should be True or False. If you are not sure, you should say that you are not sure.
"""
prompt = st.text_area(prompt)
answer = agent_chain.invoke(
prompt + "\n " + user_input,
)
st.write(answer)