m96tkmok commited on
Commit
6fa11a7
·
verified ·
1 Parent(s): 505efc3

Update app.py

Browse files

Use Embed and LLM in Hugging Face

Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -103,7 +103,9 @@ def main() -> None:
103
  #chunks = text_splitter.split_documents(docs)
104
  chunks = text_splitter.split_documents(raw_text)
105
 
106
- embeddings = OllamaEmbeddings(model='nomic-embed-text', base_url="http://localhost:11434")
 
 
107
 
108
  single_vector = embeddings.embed_query("this is some text data")
109
 
@@ -136,7 +138,8 @@ def main() -> None:
136
 
137
  prompt = ChatPromptTemplate.from_template(prompt)
138
 
139
- model = ChatOllama(model="llama3.2:latest")
 
140
 
141
  rag_chain = (
142
  {"context": retriever|format_docs, "question": RunnablePassthrough()}
 
103
  #chunks = text_splitter.split_documents(docs)
104
  chunks = text_splitter.split_documents(raw_text)
105
 
106
+ ## Mod from nomic-embed-text to nomic-ai/nomic-embed-text-v1.5
107
+ #embeddings = OllamaEmbeddings(model='nomic-ai/nomic-embed-text-v1.5', base_url="http://localhost:11434")
108
+ embeddings = OllamaEmbeddings(model='nomic-ai/nomic-embed-text-v1.5')
109
 
110
  single_vector = embeddings.embed_query("this is some text data")
111
 
 
138
 
139
  prompt = ChatPromptTemplate.from_template(prompt)
140
 
141
+ ## from llama3.2:latest to unsloth/Llama-3.2-3B
142
+ model = ChatOllama(model="unsloth/Llama-3.2-3B")
143
 
144
  rag_chain = (
145
  {"context": retriever|format_docs, "question": RunnablePassthrough()}