DevsDoCode commited on
Commit
9d73e86
·
verified ·
1 Parent(s): d0b9b99

Upload 5 files

Browse files
Files changed (5) hide show
  1. DockerFile +11 -0
  2. api_info.py +93 -0
  3. app.py +42 -0
  4. openrouter.py +86 -0
  5. requirements.txt +5 -0
DockerFile ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10
2
+
3
+ WORKDIR /code
4
+
5
+ COPY ./requirements.txt /code/requirements.txt
6
+
7
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
8
+
9
+ COPY . .
10
+
11
+ CMD ["flask", "run", "--host", "0.0.0.0", "--port", "7860"]
api_info.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ developer_info = {
2
+ 'developer': 'Devs Do Code',
3
+ 'contact': {
4
+ 'Telegram': 'https://t.me/devsdocode',
5
+ 'YouTube Channel': 'https://www.youtube.com/@DevsDoCode',
6
+ 'LinkedIn': 'https://www.linkedin.com/in/developer-sreejan/',
7
+ 'Discord Server': 'https://discord.gg/ehwfVtsAts',
8
+ 'Instagram': {
9
+ 'Personal': 'https://www.instagram.com/sree.shades_/',
10
+ 'Channel': 'https://www.instagram.com/devsdocode_/'
11
+ }
12
+ }
13
+ }
14
+
15
+
16
+ default_info = """This API is developed and being maintained by Devs Do Code (Sreejan).
17
+
18
+ **About the Developer**
19
+
20
+ Sreejan, a high school student from Patna, Bihar, India, has emerged as a notable figure in the technology sector.
21
+ His creation of an API is a testament to his dedication and expertise. Despite his youth, Sreejan's contributions
22
+ to artificial intelligence and machine learning are significant. As an AI & ML Engineer, he specializes in Deep Learning,
23
+ Natural Language Processing (NLP), and Robotics, with proficiency in Python, Java, and Mobile App Development.
24
+ Beyond his role as a technology consumer, Sreejan is an active open-source contributor, notably to projects like Hugging Face.
25
+
26
+ He is also recognized for his role in community development, particularly through "Devs Do Code," a platform he
27
+ founded to provide quality coding resources, tutorials, and projects. His mission is to equip developers with the
28
+ necessary skills to thrive in the ever-evolving tech landscape. Sreejan's commitment to sharing knowledge and
29
+ fostering collaboration is evident in his accessibility and engagement with the community across various platforms.
30
+
31
+ Connect with Sreejan and follow his journey in technology and innovation:
32
+
33
+ - Telegram: https://t.me/devsdocode
34
+ - YouTube Channel: https://www.youtube.com/@DevsDoCode
35
+ - LinkedIn: https://www.linkedin.com/in/developer-sreejan/
36
+ - Discord Server: https://discord.gg/ehwfVtsAts
37
+ - Instagram
38
+ - Personal: https://www.instagram.com/sree.shades_/
39
+ - Channel: https://www.instagram.com/devsdocode_/
40
+
41
+ Sreejan stands out not only as a developer but as a visionary and leader, driving change in the tech industry
42
+ with his passion, expertise, and unwavering commitment to community building. He continues to shape the
43
+ future of technology, one line of code at a time.
44
+ """
45
+
46
+
47
+ endpoint = {
48
+ 'route': "/generate",
49
+ 'params': {
50
+ "query": "[SEARCH QUERY]"
51
+ },
52
+ 'optional_params': {
53
+ "max_tokens": "[]",
54
+ "model": "[]",
55
+ "temperature": "[]",
56
+ "system_prompt": "[]"
57
+ },
58
+ 'url_demo' : '/generate?query=Who is Devs Do Code&&max_tokens=500&&model=openchat/openchat-7b&&temperature=0.7&&system_prompt=Your Owner is "Devs Do Code"'
59
+ }
60
+
61
+ available_models = {
62
+ "Free": {
63
+ "OpenChat 7B": "openchat/openchat-7b",
64
+ "HuggingFace Zephyr 7B Beta": "huggingfaceh4/zephyr-7b-beta",
65
+ "Mistral 7B Instruct (Free)": "mistralai/mistral-7b-instruct:free"
66
+ },
67
+
68
+ "Flagship Opensource": {
69
+ "Meta Llama 3 8B Instruct (Extended)": "meta-llama/llama-3-8b-instruct:extended",
70
+ "Lynn Soliloquy L3": "lynn/soliloquy-l3",
71
+ "Mixtral 8x22B Instruct": "mistralai/mixtral-8x22b-instruct",
72
+ "Meta Llama 3 70B Instruct (Nitro)": "meta-llama/llama-3-70b-instruct:nitro"
73
+ },
74
+
75
+ "Premium": {
76
+ "OpenAI GPT-4": "openai/gpt-4",
77
+ "OpenAI GPT-4 0314": "openai/gpt-4-0314",
78
+ "Anthropic Claude 3 Opus": "anthropic/claude-3-opus",
79
+ "Anthropic Claude 3 Opus (Beta)": "anthropic/claude-3-opus:beta",
80
+ "OpenAI GPT-4 Turbo": "openai/gpt-4-turbo"
81
+ }
82
+ }
83
+
84
+ error_message = {
85
+ 'developer_contact': {
86
+ 'Telegram': 'https://t.me/DevsDoCode',
87
+ 'Instagram': 'https://www.instagram.com/sree.shades_/',
88
+ 'Discord': 'https://discord.gg/ehwfVtsAts',
89
+ 'LinkedIn': 'https://www.linkedin.com/in/developer-sreejan/',
90
+ 'Twitter': 'https://twitter.com/Anand_Sreejan'
91
+ },
92
+ 'error': 'Oops! Something went wrong. Please contact the developer Devs Do Code.'
93
+ }
app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify
2
+ import openrouter
3
+ import api_info
4
+
5
+ app = Flask(__name__)
6
+
7
+ @app.route('/')
8
+ def initial():
9
+ return '<pre>' + api_info.default_info + '</pre>'
10
+
11
+ @app.route("/available_models", methods=['GET'])
12
+ def available_models():
13
+ return jsonify(api_info.available_models)
14
+
15
+ @app.route("/endpoints", methods=['GET'])
16
+ def endpoints():
17
+ return jsonify(api_info.endpoint)
18
+
19
+ @app.route("/developer_info", methods=['GET'])
20
+ def developer_info():
21
+ return jsonify(api_info.developer_info)
22
+
23
+ @app.route('/generate', methods=['GET'])
24
+ def generate():
25
+
26
+ query = request.args.get('query') # Assuming the query is sent in JSON format
27
+ system_prompt = str(request.args.get('system', "Be Helpful and Friendly. Keep your response straightfoward, short and concise")) # Optional parameter with default value
28
+ model = str(request.args.get('model', "openchat/openchat-7b")) # Optional parameter with default value
29
+ max_tokens = int(request.args.get('max_tokens', 512))
30
+ temperature = float(request.args.get('temperature', 0.7)) # Optional parameter with default value
31
+
32
+ if query:
33
+ response = openrouter.generate(query, model=model, system_prompt=system_prompt, temperature=temperature, max_tokens=max_tokens)
34
+ return jsonify([{'response': response}, {'developer_info': api_info.developer_info}]), 200
35
+
36
+ else:
37
+ return jsonify(api_info.error_message), 400
38
+
39
+ if __name__ == '__main__':
40
+ app.run(debug=True)
41
+
42
+
openrouter.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json
3
+ import os
4
+ from dotenv import load_dotenv; load_dotenv() # Load environment variables from .env file
5
+
6
+
7
+ def generate(query: str, system_prompt: str = "Talk Like Shakesphere" , model: str = "openai/gpt-4o", max_tokens: int = 4096, # For Simple Models
8
+ temperature: float = 0.85, frequency_penalty: float = 0.34, presence_penalty: float = 0.06,
9
+ repetition_penalty: float = 1.0, top_k: int = 0) -> str:
10
+ """
11
+ Sends a request to the OpenRouter API and returns the generated text using the specified model.
12
+
13
+ Args:
14
+ query (str): The input query or prompt.
15
+ system_prompt (str, optional): A context or introduction to set the style or tone of the generated response.
16
+ Defaults to "Talk Like Shakespeare".
17
+ model (str, optional): The language model to use for generating the response.
18
+ Defaults to "openchat/openchat-7b".
19
+ max_tokens (int, optional): The maximum number of tokens to generate in the response.
20
+ Defaults to 8096.
21
+ temperature (float, optional): A parameter controlling the diversity of the generated response.
22
+ Higher values result in more diverse outputs. Defaults to 0.85.
23
+ frequency_penalty (float, optional): A penalty applied to tokens with low frequency in the training data.
24
+ Defaults to 0.34.
25
+ presence_penalty (float, optional): A penalty applied to tokens based on their presence in the prompt.
26
+ Defaults to 0.06.
27
+ repetition_penalty (float, optional): A penalty applied to repeated tokens in the generated response.
28
+ Defaults to 1.0.
29
+ top_k (int, optional): The number of highest probability tokens to consider at each step of generation.
30
+ Defaults to 0, meaning no restriction.
31
+
32
+ Returns:
33
+ str: The generated text.
34
+
35
+ Available models:
36
+ - Free:
37
+ - "openchat/openchat-7b"
38
+ - "huggingfaceh4/zephyr-7b-beta"
39
+ - "mistralai/mistral-7b-instruct:free"
40
+
41
+ - Flagship Opensource:
42
+ - "meta-llama/llama-3-8b-instruct:extended"
43
+ - "lynn/soliloquy-l3"
44
+ - "mistralai/mixtral-8x22b-instruct"
45
+ - "meta-llama/llama-3-70b-instruct:nitro"
46
+
47
+ - Premium:
48
+ - "openai/gpt-4"
49
+ - "openai/gpt-4-0314"
50
+ - "anthropic/claude-3-opus"
51
+ - "anthropic/claude-3-opus:beta"
52
+ - "openai/gpt-4-turbo"
53
+ """
54
+
55
+ response = requests.post(
56
+ url="https://openrouter.ai/api/v1/chat/completions",
57
+ headers={
58
+ "Authorization": f"Bearer {os.environ.get('OPENROUTER')}",
59
+ },
60
+ data=json.dumps({
61
+
62
+ "messages": [
63
+ {"role": "system", "content": system_prompt},
64
+ {"role": "user", "content": query},
65
+ ],
66
+
67
+ "model": model,
68
+ "max_tokens": max_tokens,
69
+ "temperature": temperature,
70
+ "frequency_penalty": frequency_penalty,
71
+ "presence_penalty": presence_penalty,
72
+ "repetition_penalty": repetition_penalty,
73
+ "top_k": top_k,
74
+
75
+ }))
76
+
77
+
78
+
79
+ try: return response.json()["choices"][0]["message"]["content"].strip()
80
+ except Exception as e: return f"Failed to Get Response\nError: {e}\nResponse: {response.text}"
81
+
82
+
83
+ if __name__ == "__main__":
84
+ # response = openrouter_api_call("Introdue yourself and tell me your name and who made you")
85
+ response = generate("are you gpt 4 or not. do you have access to realtime data. if not then till which time you have data of")
86
+ print(response)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ fastapi==0.110.2
2
+ Flask==3.0.3
3
+ python-dotenv==1.0.1
4
+ Requests==2.31.0
5
+ uvicorn==0.29.0