Spaces:
Runtime error
Runtime error
import transformers | |
import torch | |
from fastapi import FastAPI, Response | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
from fastapi.middleware.cors import CORSMiddleware | |
app = FastAPI() | |
MODEL = None | |
TOKENIZER = None | |
# ?input=%22Name%203%20shows%22 | |
origins = ['https://aiforall.netlify.app/'] | |
app.add_middleware( | |
CORSMiddleware, | |
allow_origins=origins, | |
allow_credentials=True, | |
allow_methods=["*"], | |
allow_headers=["*"], | |
) | |
def llama(): | |
# prompt = [{'role': 'user', 'content': ""+input}] | |
# inputs = TOKENIZER.apply_chat_template( prompt, add_generation_prompt=True, return_tensors='pt' ) | |
# tokens = MODEL.generate( inputs.to(MODEL.device), max_new_tokens=1024, temperature=0.3, do_sample=True) | |
# tresponse = TOKENIZER.decode(tokens[0], skip_special_tokens=False) | |
# print(tresponse) | |
return Response(content="hello world", media_type="application/json") | |
# @app.on_event("startup") | |
# def init_model(): | |
# global MODEL | |
# global TOKENIZER | |
# if not MODEL: | |
# print("loading model") | |
# TOKENIZER = AutoTokenizer.from_pretrained('stabilityai/stablelm-zephyr-3b') | |
# MODEL = AutoModelForCausalLM.from_pretrained('stabilityai/stablelm-zephyr-3b', device_map="auto") | |
# print("loaded model") | |