apaxray commited on
Commit
1cb7165
·
verified ·
1 Parent(s): 26dccfd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -50
app.py CHANGED
@@ -1,53 +1,33 @@
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
- from sympy import solve, symbols
3
- from fastapi import FastAPI
4
- import uvicorn
5
 
6
-
7
-
8
-
9
-
10
- tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
11
- model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
12
-
13
-
14
-
15
- # مدل‌های مختلف
16
- MODEL_GENERAL = "meta-llama/Llama-2-7b-chat-hf"
17
- MODEL_IRAN = "HooshvareLab/bert-fa-base-uncased"
18
- MODEL_MATH = None # SymPy برای ریاضی
19
-
20
- # بارگذاری مدل‌ها
21
- tokenizer_general = AutoTokenizer.from_pretrained(MODEL_GENERAL)
22
- model_general = AutoModelForCausalLM.from_pretrained(MODEL_GENERAL)
23
-
24
- tokenizer_iran = AutoTokenizer.from_pretrained(MODEL_IRAN)
25
- model_iran = AutoModelForCausalLM.from_pretrained(MODEL_IRAN)
26
-
27
- # FastAPI برای مدیریت درخواست‌ها
28
- app = FastAPI()
29
-
30
- def generate_response(model, tokenizer, prompt, max_tokens=100):
31
  inputs = tokenizer(prompt, return_tensors="pt")
32
- outputs = model.generate(inputs.input_ids, max_new_tokens=max_tokens)
33
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
34
-
35
- @app.post("/chat")
36
- def chat(input_text: str, mode: str = "general"):
37
- if mode == "general":
38
- response = generate_response(model_general, tokenizer_general, input_text)
39
- elif mode == "iran":
40
- response = generate_response(model_iran, tokenizer_iran, input_text)
41
- elif mode == "math":
42
- x = symbols("x")
43
- try:
44
- solution = solve(input_text, x)
45
- response = f"Solution: {solution}"
46
- except Exception as e:
47
- response = f"Math error: {str(e)}"
48
- else:
49
- response = "Invalid mode selected."
50
- return {"response": response}
51
-
52
- if __name__ == "__main__":
53
- uvicorn.run(app, host="0.0.0.0", port=8000)
 
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
 
 
2
 
3
+ # مدل‌های مختلف AI
4
+ models = {
5
+ 'falcon': "huggingface/falcon-7b-instruct",
6
+ 'gpt_neox': "EleutherAI/gpt-neox-20b",
7
+ 'persian': "HooshvareLab/bert-fa-zwnj-base",
8
+ 'math': "EleutherAI/gpt-neox-20b-math" # مدل ریاضی (باید ایجاد شود یا از مدل‌های مشابه استفاده کنید)
9
+ }
10
+
11
+ # بارگذاری مدل‌ها از Hugging Face
12
+ tokenizers = {name: AutoTokenizer.from_pretrained(path) for name, path in models.items()}
13
+ models = {name: AutoModelForCausalLM.from_pretrained(path) for name, path in models.items()}
14
+
15
+ def generate_response(prompt, model_name="falcon"):
16
+ tokenizer = tokenizers[model_name]
17
+ model = models[model_name]
18
+
 
 
 
 
 
 
 
 
 
19
  inputs = tokenizer(prompt, return_tensors="pt")
20
+ outputs = model.generate(inputs["input_ids"], max_length=100, num_return_sequences=1)
21
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
22
+
23
+ return response
24
+
25
+ # نمونه استفاده
26
+ prompt = "سلام، امروز چه خبر؟"
27
+ response_falcon = generate_response(prompt, model_name="falcon")
28
+ response_gpt_neox = generate_response(prompt, model_name="gpt_neox")
29
+ response_persian = generate_response(prompt, model_name="persian")
30
+
31
+ print("Falcon Response:", response_falcon)
32
+ print("GPT-NeoX Response:", response_gpt_neox)
33
+ print("Persian Response:", response_persian)