akhaliq HF staff commited on
Commit
70db785
·
1 Parent(s): 1acb26d

add nvidia new models

Browse files
Files changed (4) hide show
  1. app.py +2 -2
  2. app_nvidia.py +14 -44
  3. pyproject.toml +1 -1
  4. requirements.txt +1 -1
app.py CHANGED
@@ -7,7 +7,6 @@ from app_fal import demo as demo_fal
7
  from app_fireworks import demo as demo_fireworks
8
  from app_huggingface import demo as demo_huggingface
9
  from app_meta import demo as demo_meta
10
- from app_nvidia import demo as demo_nvidia
11
  from app_omini import demo as demo_omini
12
  from app_paligemma import demo as demo_paligemma
13
  from app_perplexity import demo as demo_perplexity
@@ -36,11 +35,13 @@ from app_langchain import demo as demo_langchain
36
  from app_mistral import demo as demo_mistral
37
  from app_minimax import demo as demo_minimax
38
  from app_minimax_coder import demo as demo_minimax_coder
 
39
  from utils import get_app
40
 
41
  # Create mapping of providers to their demos
42
  PROVIDERS = {
43
  "Minimax Coder": demo_minimax_coder,
 
44
  "Minimax": demo_minimax,
45
  "Gemini Camera": demo_gemini_camera,
46
  "Mistral": demo_mistral,
@@ -77,7 +78,6 @@ PROVIDERS = {
77
  "Allen AI": demo_allenai,
78
  "Perplexity": demo_perplexity,
79
  "Experimental": demo_experimental,
80
- "NVIDIA": demo_nvidia,
81
  }
82
 
83
  demo = get_app(
 
7
  from app_fireworks import demo as demo_fireworks
8
  from app_huggingface import demo as demo_huggingface
9
  from app_meta import demo as demo_meta
 
10
  from app_omini import demo as demo_omini
11
  from app_paligemma import demo as demo_paligemma
12
  from app_perplexity import demo as demo_perplexity
 
35
  from app_mistral import demo as demo_mistral
36
  from app_minimax import demo as demo_minimax
37
  from app_minimax_coder import demo as demo_minimax_coder
38
+ from app_nvidia import demo as demo_nvidia
39
  from utils import get_app
40
 
41
  # Create mapping of providers to their demos
42
  PROVIDERS = {
43
  "Minimax Coder": demo_minimax_coder,
44
+ "NVIDIA": demo_nvidia,
45
  "Minimax": demo_minimax,
46
  "Gemini Camera": demo_gemini_camera,
47
  "Mistral": demo_mistral,
 
78
  "Allen AI": demo_allenai,
79
  "Perplexity": demo_perplexity,
80
  "Experimental": demo_experimental,
 
81
  }
82
 
83
  demo = get_app(
app_nvidia.py CHANGED
@@ -1,51 +1,21 @@
1
- import os
2
 
3
- import nvidia_gradio
4
 
5
- from utils import get_app
 
6
 
 
 
 
 
 
7
  demo = get_app(
8
- models=[
9
- "nvidia/llama3-chatqa-1.5-70b",
10
- "nvidia/llama3-chatqa-1.5-8b",
11
- "nvidia-nemotron-4-340b-instruct",
12
- "meta/llama-3.1-70b-instruct",
13
- "meta/codellama-70b",
14
- "meta/llama2-70b",
15
- "meta/llama3-8b",
16
- "meta/llama3-70b",
17
- "mistralai/codestral-22b-instruct-v0.1",
18
- "mistralai/mathstral-7b-v0.1",
19
- "mistralai/mistral-large-2-instruct",
20
- "mistralai/mistral-7b-instruct",
21
- "mistralai/mistral-7b-instruct-v0.3",
22
- "mistralai/mixtral-8x7b-instruct",
23
- "mistralai/mixtral-8x22b-instruct",
24
- "mistralai/mistral-large",
25
- "google/gemma-2b",
26
- "google/gemma-7b",
27
- "google/gemma-2-2b-it",
28
- "google/gemma-2-9b-it",
29
- "google/gemma-2-27b-it",
30
- "google/codegemma-1.1-7b",
31
- "google/codegemma-7b",
32
- "google/recurrentgemma-2b",
33
- "google/shieldgemma-9b",
34
- "microsoft/phi-3-medium-128k-instruct",
35
- "microsoft/phi-3-medium-4k-instruct",
36
- "microsoft/phi-3-mini-128k-instruct",
37
- "microsoft/phi-3-mini-4k-instruct",
38
- "microsoft/phi-3-small-128k-instruct",
39
- "microsoft/phi-3-small-8k-instruct",
40
- "qwen/qwen2-7b-instruct",
41
- "databricks/dbrx-instruct",
42
- "deepseek-ai/deepseek-coder-6.7b-instruct",
43
- "upstage/solar-10.7b-instruct",
44
- "snowflake/arctic",
45
- ],
46
- default_model="meta/llama-3.1-70b-instruct",
47
- src=nvidia_gradio.registry,
48
- accept_token=not os.getenv("NVIDIA_API_KEY"),
49
  )
50
 
51
  if __name__ == "__main__":
 
1
+ import ai_gradio
2
 
3
+ from utils_ai_gradio import get_app
4
 
5
+ # Get the nvidia models but keep their full names for loading
6
+ NVIDIA_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("nvidia:")]
7
 
8
+ # Create display names without the prefix
9
+ NVIDIA_MODELS_DISPLAY = [k.replace("nvidia:", "") for k in NVIDIA_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
  demo = get_app(
14
+ models=NVIDIA_MODELS_FULL, # Use the full names with prefix
15
+ default_model=NVIDIA_MODELS_FULL[0],
16
+ dropdown_label="Select Nvidia Model",
17
+ choices=NVIDIA_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  )
20
 
21
  if __name__ == "__main__":
pyproject.toml CHANGED
@@ -38,7 +38,7 @@ dependencies = [
38
  "langchain>=0.3.14",
39
  "chromadb>=0.5.23",
40
  "openai>=1.55.0",
41
- "ai-gradio[crewai,deepseek,gemini,groq,hyperbolic,openai,smolagents,transformers, langchain, mistral,minimax]>=0.2.18",
42
  ]
43
 
44
  [tool.uv.sources]
 
38
  "langchain>=0.3.14",
39
  "chromadb>=0.5.23",
40
  "openai>=1.55.0",
41
+ "ai-gradio[crewai,deepseek,gemini,groq,hyperbolic,openai,smolagents,transformers, langchain, mistral,minimax,nvidia]>=0.2.19",
42
  ]
43
 
44
  [tool.uv.sources]
requirements.txt CHANGED
@@ -2,7 +2,7 @@
2
  # uv pip compile pyproject.toml -o requirements.txt
3
  accelerate==1.2.1
4
  # via ai-gradio
5
- ai-gradio==0.2.18
6
  # via anychat (pyproject.toml)
7
  aiofiles==23.2.1
8
  # via gradio
 
2
  # uv pip compile pyproject.toml -o requirements.txt
3
  accelerate==1.2.1
4
  # via ai-gradio
5
+ ai-gradio==0.2.19
6
  # via anychat (pyproject.toml)
7
  aiofiles==23.2.1
8
  # via gradio