rag / model_loader.py
user
upload to hf
176bc9a
raw
history blame
370 Bytes
from transformers import AutoTokenizer, AutoModel, pipeline
def load_model():
tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/all-MiniLM-L6-v2")
model = AutoModel.from_pretrained("sentence-transformers/all-MiniLM-L6-v2")
return tokenizer, model
def load_generator():
return pipeline('text2text-generation', model='google/flan-t5-base')