Spaces:
Sleeping
Sleeping
Sachin
commited on
Commit
Β·
7750a2f
1
Parent(s):
3990f3a
update with env
Browse files- .gitignore +2 -0
- app.py +90 -4
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
.env
|
2 |
+
.gradio
|
app.py
CHANGED
@@ -1,7 +1,93 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
2 |
|
3 |
-
|
4 |
-
return "Hello " + name + "!!"
|
5 |
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import os
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
import requests
|
5 |
+
import json
|
6 |
|
7 |
+
load_dotenv()
|
|
|
8 |
|
9 |
+
SERVER_URL = os.getenv('SERVER_URL')
|
10 |
+
OPEN_AI_KEY = os.getenv('OPEN_AI_KEY')
|
11 |
+
|
12 |
+
examples = [
|
13 |
+
|
14 |
+
'black your game sucks',
|
15 |
+
'suuuuuuuk my d',
|
16 |
+
"red you're a π"
|
17 |
+
]
|
18 |
+
|
19 |
+
|
20 |
+
def get_openai_tox_score(text):
|
21 |
+
url = "https://api.openai.com/v1/moderations"
|
22 |
+
|
23 |
+
payload = json.dumps({
|
24 |
+
"model": "omni-moderation-latest",
|
25 |
+
"input": text
|
26 |
+
})
|
27 |
+
headers = {
|
28 |
+
'Authorization': f'Bearer {OPEN_AI_KEY}',
|
29 |
+
'Content-Type': 'application/json',
|
30 |
+
}
|
31 |
+
|
32 |
+
response = requests.request("POST", url, headers=headers, data=payload)
|
33 |
+
data = response.json()
|
34 |
+
if data['results'][0]['flagged']:
|
35 |
+
return "This content has been flagged as potentially toxic. π"
|
36 |
+
return "This content appears to be safe. β
"
|
37 |
+
|
38 |
+
def get_tox_score(text):
|
39 |
+
url = f"{SERVER_URL}/api/analyzer/toxscore"
|
40 |
+
|
41 |
+
payload = json.dumps({
|
42 |
+
"text": text
|
43 |
+
})
|
44 |
+
headers = {
|
45 |
+
'Content-Type': 'application/json',
|
46 |
+
}
|
47 |
+
try:
|
48 |
+
response = requests.request("POST", url, headers=headers, data=payload)
|
49 |
+
open_ai_response = get_openai_tox_score(text)
|
50 |
+
if response.json()['flagged']:
|
51 |
+
return "This content has been flagged as potentially toxic. π", open_ai_response
|
52 |
+
return "This content appears to be safe. β
", open_ai_response
|
53 |
+
|
54 |
+
except Exception as e:
|
55 |
+
print(e)
|
56 |
+
return "Error Occured"
|
57 |
+
|
58 |
+
|
59 |
+
with gr.Blocks() as demo:
|
60 |
+
|
61 |
+
gr.HTML(
|
62 |
+
f"""
|
63 |
+
<div style="text-align: center; margin-bottom: 20px;">
|
64 |
+
<div style="display: flex; justify-content: center;">
|
65 |
+
<img src="https://raw.githubusercontent.com/showlab/ShowUI/refs/heads/main/assets/showui.jpg" alt="ShowUI" width="320" style="margin-bottom: 10px;"/>
|
66 |
+
</div>
|
67 |
+
<p>ShowUI is a lightweight vision-language-action model for GUI agents.</p>
|
68 |
+
<div style="display: flex; justify-content: center; gap: 15px; font-size: 20px;">
|
69 |
+
<a href="https://huggingface.co/showlab/ShowUI-2B" target="_blank">
|
70 |
+
<img src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-ShowUI--2B-blue" alt="model"/>
|
71 |
+
</a>
|
72 |
+
<a href="https://arxiv.org/abs/2411.17465" target="_blank">
|
73 |
+
<img src="https://img.shields.io/badge/arXiv%20paper-2411.17465-b31b1b.svg" alt="arXiv"/>
|
74 |
+
</a>
|
75 |
+
<a href="https://github.com/showlab/ShowUI" target="_blank">
|
76 |
+
<img src="https://img.shields.io/badge/GitHub-ShowUI-black" alt="GitHub"/>
|
77 |
+
</a>
|
78 |
+
</div>
|
79 |
+
</div>
|
80 |
+
"""
|
81 |
+
)
|
82 |
+
|
83 |
+
|
84 |
+
message_input = gr.Textbox(label="Chat Message", placeholder="Type your message here...")
|
85 |
+
check_button = gr.Button("Check Toxicity")
|
86 |
+
with gr.Row():
|
87 |
+
toxicity_output = gr.Textbox(label="Toxicity Result CompaniAI", interactive=False)
|
88 |
+
toxicity_openAI_output = gr.Textbox(label="Toxicity Result OpenAI", interactive=False)
|
89 |
+
|
90 |
+
|
91 |
+
check_button.click(get_tox_score, inputs=message_input, outputs=[toxicity_output,toxicity_openAI_output])
|
92 |
+
|
93 |
+
demo.launch()
|