Update app.py
Browse files
app.py
CHANGED
@@ -9,7 +9,7 @@ from langchain_ollama.llms import OllamaLLM
|
|
9 |
from langchain.chains import LLMChain
|
10 |
import os
|
11 |
from huggingface_hub import login
|
12 |
-
|
13 |
|
14 |
|
15 |
@st.cache_resource
|
@@ -33,10 +33,12 @@ def load_model():
|
|
33 |
])
|
34 |
|
35 |
|
36 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
37 |
|
38 |
-
llm = OllamaLLM(model="gemma2", temperature=0, device=device)
|
|
|
39 |
|
|
|
40 |
chain = prompt|llm
|
41 |
return chain
|
42 |
|
|
|
9 |
from langchain.chains import LLMChain
|
10 |
import os
|
11 |
from huggingface_hub import login
|
12 |
+
from langchain_groq import ChatGroq
|
13 |
|
14 |
|
15 |
@st.cache_resource
|
|
|
33 |
])
|
34 |
|
35 |
|
36 |
+
#device = "cuda" if torch.cuda.is_available() else "cpu"
|
37 |
|
38 |
+
#llm = OllamaLLM(model="gemma2", temperature=0, device=device)
|
39 |
+
api_key = "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"
|
40 |
|
41 |
+
llm = ChatGroq(model = "llama-3.1-70b-versatile", temperature = 0,api_key = api_key)
|
42 |
chain = prompt|llm
|
43 |
return chain
|
44 |
|