SabziAi / app.py
apaxray's picture
Update app.py
b7b63ae verified
raw
history blame
1.61 kB
from transformers import AutoTokenizer, AutoModelForCausalLM
import os
# خواندن توکن از متغیر محیطی
token = os.getenv('HF_TOKEN')
tokenizer = AutoTokenizer.from_pretrained("huggingface/falcon-7b-instruct", use_auth_token=token)
# مدل‌های مختلف AI
models = {
'falcon': "huggingface/falcon-7b-instruct",
'gpt_neox': "EleutherAI/gpt-neox-20b",
'persian': "HooshvareLab/bert-fa-zwnj-base",
'math': "EleutherAI/gpt-neox-20b-math" # مدل ریاضی (باید ایجاد شود یا از مدل‌های مشابه استفاده کنید)
}
# بارگذاری مدل‌ها از Hugging Face
tokenizers = {name: AutoTokenizer.from_pretrained(path) for name, path in models.items()}
models = {name: AutoModelForCausalLM.from_pretrained(path) for name, path in models.items()}
def generate_response(prompt, model_name="falcon"):
tokenizer = tokenizers[model_name]
model = models[model_name]
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(inputs["input_ids"], max_length=100, num_return_sequences=1)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
# نمونه استفاده
prompt = "سلام، امروز چه خبر؟"
response_falcon = generate_response(prompt, model_name="falcon")
response_gpt_neox = generate_response(prompt, model_name="gpt_neox")
response_persian = generate_response(prompt, model_name="persian")
print("Falcon Response:", response_falcon)
print("GPT-NeoX Response:", response_gpt_neox)
print("Persian Response:", response_persian)