Upload app.py
Browse files
app.py
CHANGED
@@ -21,11 +21,8 @@ os.environ['OPENAI_API_KEY'] = "gl-U2FsdGVkX1+0bNWD6YsVLZUYsn0m1WfLxUzrP0xUFbtW
|
|
21 |
os.environ["OPENAI_BASE_URL"] = "https://aibe.mygreatlearning.com/openai/v1" # e.g. "https://aibe.mygreatlearning.com/openai/v1";
|
22 |
|
23 |
model_name = 'gpt-4o-mini' # e.g. 'gpt-3.5-turbo'
|
24 |
-
|
25 |
-
# Initialize the ChatOpenAI model
|
26 |
llm = ChatOpenAI(model_name=model_name, temperature=0) # Set temperature to 0 for deterministic output
|
27 |
-
# Create a HumanMessage
|
28 |
-
#user_message = HumanMessage(content="What's the weather like today?")
|
29 |
|
30 |
# Define the embedding model and the vectorstore
|
31 |
embedding_model = SentenceTransformerEmbeddings(model_name='thenlper/gte-large')
|
@@ -121,13 +118,6 @@ def llm_query(user_input,company):
|
|
121 |
# Call the chat model with the message
|
122 |
response = llm(prompt)
|
123 |
|
124 |
-
# response = llm_client.chat.completions.create(
|
125 |
-
# model=model_name,
|
126 |
-
# messages=prompt,
|
127 |
-
# temperature=0
|
128 |
-
# )
|
129 |
-
|
130 |
-
#llm_response = response.choices[0].message.content.strip()
|
131 |
llm_response = response.content.strip()
|
132 |
|
133 |
except Exception as e:
|
@@ -137,9 +127,7 @@ def llm_query(user_input,company):
|
|
137 |
print(llm_response)
|
138 |
|
139 |
# While the prediction is made, log both the inputs and outputs to a local log file
|
140 |
-
# While writing to the log file, ensure that the commit scheduler is locked to avoid parallel
|
141 |
-
# access
|
142 |
-
|
143 |
with scheduler.lock:
|
144 |
with log_file.open("a") as f:
|
145 |
f.write(json.dumps(
|
@@ -159,7 +147,7 @@ company = gr.Radio(label='Company:', choices=["aws", "google", "IBM", "Meta", "m
|
|
159 |
|
160 |
# Create Gradio interface
|
161 |
# For the inputs parameter of Interface provide [textbox,company] with outputs parameter of Interface provide prediction
|
162 |
-
demo = gr.Interface(fn=llm_query, inputs=[textbox, company], outputs="text", title="Financial Analyst Assistant", description="Ask questions about the financial performance of AWS, Google, IBM, Meta, and Microsoft based on their 10-K reports.\n\nPlease enter a question below.", theme=gr.themes.Soft())
|
163 |
|
164 |
demo.queue()
|
165 |
-
demo.launch()
|
|
|
21 |
os.environ["OPENAI_BASE_URL"] = "https://aibe.mygreatlearning.com/openai/v1" # e.g. "https://aibe.mygreatlearning.com/openai/v1";
|
22 |
|
23 |
model_name = 'gpt-4o-mini' # e.g. 'gpt-3.5-turbo'
|
24 |
+
|
|
|
25 |
llm = ChatOpenAI(model_name=model_name, temperature=0) # Set temperature to 0 for deterministic output
|
|
|
|
|
26 |
|
27 |
# Define the embedding model and the vectorstore
|
28 |
embedding_model = SentenceTransformerEmbeddings(model_name='thenlper/gte-large')
|
|
|
118 |
# Call the chat model with the message
|
119 |
response = llm(prompt)
|
120 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
llm_response = response.content.strip()
|
122 |
|
123 |
except Exception as e:
|
|
|
127 |
print(llm_response)
|
128 |
|
129 |
# While the prediction is made, log both the inputs and outputs to a local log file
|
130 |
+
# While writing to the log file, ensure that the commit scheduler is locked to avoid parallel access
|
|
|
|
|
131 |
with scheduler.lock:
|
132 |
with log_file.open("a") as f:
|
133 |
f.write(json.dumps(
|
|
|
147 |
|
148 |
# Create Gradio interface
|
149 |
# For the inputs parameter of Interface provide [textbox,company] with outputs parameter of Interface provide prediction
|
150 |
+
demo = gr.Interface(fn=llm_query, inputs=[textbox, company], outputs="text", title="FY23 Financial Analyst Assistant", description="Ask questions about the financial performance of AWS, Google, IBM, Meta, and Microsoft based on their 10-K reports.\n\nPlease enter a question below.", theme=gr.themes.Soft())
|
151 |
|
152 |
demo.queue()
|
153 |
+
demo.launch()
|