Spaces:
Runtime error
Runtime error
File size: 2,324 Bytes
d521aaf b58be1d d521aaf dc74d38 d521aaf dc74d38 63c889a dc74d38 b58be1d d521aaf dc74d38 d521aaf dc74d38 d521aaf dc74d38 d521aaf dc74d38 d521aaf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
#### INSTALL LIB
import subprocess
import os
token = os.environ.get("GITHUB_TOKEN", None)
if not token:
raise ValueError("Token not found")
# Build the install command
command = f"pip install git+https://x-access-token:{token}:[email protected]/philschmid/model-recommender.git"
subprocess.run(command, shell=True, check=True)
#### INSTALL LIB
from dataclasses import asdict
import json
import gradio as gr
from recommender.main import get_tgi_config
def greet(model_id, gpu_memory, num_gpus):
try:
configs = get_tgi_config(model_id, gpu_memory, num_gpus)
except Exception as e:
return json.dumps({"error": str(e)})
if configs is None:
return json.dumps({"error": f"Couldn't generate TGI config for {model_id}"})
return json.dumps(asdict(configs))
theme = gr.themes.Monochrome(
primary_hue="indigo",
secondary_hue="blue",
neutral_hue="slate",
radius_size=gr.themes.sizes.radius_sm,
font=[
gr.themes.GoogleFont("Open Sans"),
"ui-sans-serif",
"system-ui",
"sans-serif",
],
)
DESCRIPTION = """
<div style="text-align: center; max-width: 650px; margin: 0 auto; display:grid; gap:25px;">
<h1 style="font-weight: 900; margin-bottom: 7px;margin-top:5px">
Hugging Face TGI Configuration Creator
</h1>
<p style="margin-bottom: 10px; font-size: 94%; line-height: 23px;">
This Space helps you generate and validate Hugging Face TGI configurations for your model. Provide you model ID and the amount of GPU memory you have available and we will generate a configuration for you, which you can use to run your model on TGI.
</p>
</div>
"""
demo = gr.Interface(
fn=greet,
description=DESCRIPTION,
inputs=[
gr.Textbox(label="Model ID", placeholder="meta-llama/Llama-2-7b-chat-hf"),
gr.Slider(
step=4,
minimum=16,
maximum=640,
value=24,
label="GPU memory",
info="Select how much GPU memory you have available",
),
gr.Slider(
step=1,
minimum=1,
maximum=8,
value=1,
label="# of GPUs",
info="Select how many GPUs you have available",
),
],
theme=theme,
outputs=[gr.JSON()],
)
demo.launch()
|