Abid Ali Awan commited on
Commit
c84dd0f
·
1 Parent(s): 00e0504

trigger_mode="always_last" added

Browse files
Files changed (1) hide show
  1. app.py +9 -34
app.py CHANGED
@@ -14,9 +14,8 @@ groq_api_key = os.getenv("Groq_API_Key")
14
  llm = ChatGroq(model="llama-3.1-70b-versatile", api_key=groq_api_key)
15
 
16
  # Initialize the embedding model
17
- embed_model = HuggingFaceEmbeddings(
18
- model_name="mixedbread-ai/mxbai-embed-large-v1", model_kwargs={"device": "cpu"}
19
- )
20
 
21
  # Load the vector store from a local directory
22
  vectorstore = Chroma(
@@ -49,28 +48,13 @@ rag_chain = (
49
  | StrOutputParser()
50
  )
51
 
52
-
53
  # Define the function to stream the RAG memory
54
- def rag_memory_stream(text, change_tracker):
55
- if change_tracker.get("changed", False):
56
- return # Stop the generation if input has changed
57
-
58
  partial_text = ""
59
  for new_text in rag_chain.stream(text):
60
- if change_tracker.get("changed", False):
61
- return # Stop the generation if input has changed
62
  partial_text += new_text
63
- yield partial_text # Yield the updated conversation history
64
-
65
-
66
- def input_listener(text, change_tracker):
67
- change_tracker["changed"] = True
68
- change_tracker["changed"] = False
69
- return text
70
-
71
-
72
- # Initialize a change tracker
73
- change_tracker = {"changed": False}
74
 
75
  # Set up the Gradio interface
76
  title = "Real-time AI App with Groq API and LangChain"
@@ -80,21 +64,11 @@ description = """
80
  </center>
81
  """
82
 
83
- # Define input components with event listeners
84
- text_input = gr.Textbox(label="Enter your question", elem_id="question")
85
- text_input.change(
86
- fn=input_listener,
87
- inputs=[text_input],
88
- outputs=[text_input],
89
- change_tracker=change_tracker,
90
- )
91
-
92
- # Create the Gradio interface
93
  demo = gr.Interface(
94
  title=title,
95
  description=description,
96
- fn=lambda text: rag_memory_stream(text, change_tracker),
97
- inputs=text_input,
98
  outputs="text",
99
  live=True,
100
  batch=True,
@@ -102,8 +76,9 @@ demo = gr.Interface(
102
  concurrency_limit=12,
103
  allow_flagging="never",
104
  theme=gr.themes.Soft(),
 
105
  )
106
 
107
  # Launch the Gradio interface
108
  demo.queue()
109
- demo.launch()
 
14
  llm = ChatGroq(model="llama-3.1-70b-versatile", api_key=groq_api_key)
15
 
16
  # Initialize the embedding model
17
+ embed_model = HuggingFaceEmbeddings(model_name="mixedbread-ai/mxbai-embed-large-v1",
18
+ model_kwargs = {'device': 'cpu'})
 
19
 
20
  # Load the vector store from a local directory
21
  vectorstore = Chroma(
 
48
  | StrOutputParser()
49
  )
50
 
 
51
  # Define the function to stream the RAG memory
52
+ def rag_memory_stream(text):
 
 
 
53
  partial_text = ""
54
  for new_text in rag_chain.stream(text):
 
 
55
  partial_text += new_text
56
+ # Yield the updated conversation history
57
+ yield partial_text
 
 
 
 
 
 
 
 
 
58
 
59
  # Set up the Gradio interface
60
  title = "Real-time AI App with Groq API and LangChain"
 
64
  </center>
65
  """
66
 
 
 
 
 
 
 
 
 
 
 
67
  demo = gr.Interface(
68
  title=title,
69
  description=description,
70
+ fn=rag_memory_stream,
71
+ inputs="text",
72
  outputs="text",
73
  live=True,
74
  batch=True,
 
76
  concurrency_limit=12,
77
  allow_flagging="never",
78
  theme=gr.themes.Soft(),
79
+ trigger_mode="always_last",
80
  )
81
 
82
  # Launch the Gradio interface
83
  demo.queue()
84
+ demo.launch()