|
from langchain.llms import OpenAI |
|
from langchain.chains.qa_with_sources import load_qa_with_sources_chain |
|
from langchain.docstore.document import Document |
|
import requests |
|
import pathlib |
|
import subprocess |
|
import tempfile |
|
import os |
|
import gradio as gr |
|
import pickle |
|
|
|
|
|
from langchain.embeddings.openai import OpenAIEmbeddings |
|
from langchain.vectorstores.faiss import FAISS |
|
from langchain.text_splitter import CharacterTextSplitter |
|
|
|
|
|
with open("search_index.pickle", "rb") as f: |
|
search_index = pickle.load(f) |
|
|
|
|
|
def print_answer(question, openai): |
|
|
|
chain = load_qa_with_sources_chain(openai) |
|
response = ( |
|
chain( |
|
{ |
|
"input_documents": search_index.similarity_search(question, k=4), |
|
"question": question, |
|
}, |
|
return_only_outputs=True, |
|
)["output_text"] |
|
) |
|
if len(response.split('\n')[-1].split())>2: |
|
response = response.split('\n')[0] + ', '.join([' <a href="' + response.split('\n')[-1].split()[i] + '" target="_blank"><u>Click Link' + str(i) + '</u></a>' for i in range(1,len(response.split('\n')[-1].split()))]) |
|
else: |
|
response = response.split('\n')[0] + ' <a href="' + response.split('\n')[-1].split()[-1] + '" target="_blank"><u>Click Link</u></a>' |
|
return response |
|
|
|
|
|
def chat(message, history, openai_api_key): |
|
|
|
openai = OpenAI(temperature=0, openai_api_key=openai_api_key ) |
|
|
|
history = history or [] |
|
message = message.lower() |
|
response = print_answer(message, openai) |
|
history.append((message, response)) |
|
return history, history |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.HTML("""<div style="text-align: center; max-width: 700px; margin: 0 auto;"> |
|
<div |
|
style=" |
|
display: inline-flex; |
|
align-items: center; |
|
gap: 0.8rem; |
|
font-size: 1.75rem; |
|
" |
|
> |
|
<h1 style="font-weight: 900; margin-bottom: 7px; margin-top: 5px;"> |
|
xrp-generative-art QandA - LangChain Bot |
|
</h1> |
|
</div> |
|
<p style="margin-bottom: 10px; font-size: 94%"> |
|
Hi, I'm a Q and A xrp-generative-art expert bot, start by typing in your OpenAI API key, questions/issues you are facing in your xrp-generative-art implementations and then press enter.<br> |
|
<a href="https://huggingface.co/spaces/ysharma/InstructPix2Pix_Chatbot?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate Space with GPU Upgrade for fast Inference & no queue<br> |
|
Built using <a href="https://langchain.readthedocs.io/en/latest/" target="_blank">LangChain</a> and <a href="https://github.com/gradio-app/gradio" target="_blank">Gradio</a> for the xrp-generative-art Repo |
|
</p> |
|
</div>""") |
|
with gr.Row(): |
|
question = gr.Textbox(label = 'Type in your questions about xrp-generative-art here and press Enter!', placeholder = 'What questions do you want to ask about the xrp-generative-art library?') |
|
openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here") |
|
state = gr.State() |
|
chatbot = gr.Chatbot() |
|
question.submit(chat, [question, state, openai_api_key], [chatbot, state]) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |