rag / model_loader.py
user
modifications for remote development using huggingface resouces
b5553ae
raw
history blame
682 Bytes
from transformers import AutoTokenizer, AutoModel, pipeline
from huggingface_hub import hf_hub_download
def load_model():
tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/all-MiniLM-L6-v2")
model = AutoModel.from_pretrained("sentence-transformers/all-MiniLM-L6-v2", device_map="cpu")
return tokenizer, model
def load_generator():
return pipeline('text2text-generation', model='google/flan-t5-base', device_map="cpu")
def download_pdf():
# Replace 'your_pdf_file.pdf' with the actual name of your PDF file on Hugging Face
file_path = hf_hub_download(repo_id="your_username/your_repo_name", filename="your_pdf_file.pdf")
return file_path