自良
update log server
0ce740f
import os
import json
import datetime
import requests
import numpy as np
import gradio as gr
from pathlib import Path
from model.model_registry import *
from .constants import LOGDIR, LOG_SERVER_ADDR, APPEND_JSON, SAVE_IMAGE, SAVE_VIDEO, SAVE_LOG
from typing import Union
enable_btn = gr.update(interactive=True, visible=True)
disable_btn = gr.update(interactive=False)
invisible_btn = gr.update(interactive=False, visible=False)
no_change_btn = gr.update(value="No Change", interactive=True, visible=True)
def build_about():
about_markdown = f"""
# About Us
This is a project from Tongyi Lab.
## Contributors:
Chen Liang, Lianghua Huang, Jingwu Fang, Huanzhang Dou, Wei Wang, Zhi-Fan Wu, Yupeng Shi, Junge Zhang, Xin Zhao, Yu Liu.
## Contact:
Email: [email protected] (Chen Liang)
## Sponsorship
We are keep looking for sponsorship to support the arena project for the long term. Please contact us if you are interested in supporting this project.
## Acknowledgment
Our codebase is built upon <a href="https://github.com/lm-sys/FastChat" target="_blank">FastChat</a> and <a href="https://huggingface.co/spaces/TIGER-Lab/GenAI-Arena" target="_blank">GenAI-Arena</a>.
"""
gr.Markdown(about_markdown, elem_id="about_markdown")
acknowledgment_md = """
### Acknowledgment
<div class="image-container">
<p> Our codebase is built upon <a href="https://github.com/lm-sys/FastChat" target="_blank">FastChat</a> and <a href="https://huggingface.co/spaces/TIGER-Lab/GenAI-Arena" target="_blank">GenAI-Arena</a>.</p>
</div>
"""
block_css = """
#notice_markdown {
font-size: 110%
}
#notice_markdown th {
display: none;
}
#notice_markdown td {
padding-top: 6px;
padding-bottom: 6px;
}
#model_description_markdown {
font-size: 110%
}
#leaderboard_markdown {
font-size: 110%
}
#leaderboard_markdown td {
padding-top: 6px;
padding-bottom: 6px;
}
#leaderboard_dataframe td {
line-height: 0.1em;
}
#about_markdown {
font-size: 110%
}
#ack_markdown {
font-size: 110%
}
#input_box textarea {
}
#prompt {
background-color: #F5F5F5;
padding: 16px;
border: 1px solid #D1D1D1;
font-size: 16px;
color: #000000;
}
footer {
display:none !important
}
.image-about img {
margin: 0 30px;
margin-top: 30px;
height: 60px;
max-height: 100%;
width: auto;
float: left;
.input-image, .image-preview {
margin: 0 30px;
height: 30px;
max-height: 100%;
width: auto;
max-width: 30%;}
}
.vote-btn {
background-color: #F5F5F5;
border-radius: 8px;
color: #000000;
font-size: 16px;
font-weight: bold;
display: flex;
justify-content: center;
align-items: center;
margin: 5px;
width: auto;
}
.vote-btn:hover {
background-color: #E1E1E1;
opacity: 0.9;
cursor: pointer;
}
.vote-btn:active {
background-color: #D1D1D1;
opacity: 1;
}
.btn-row {
background-color: white !important;
}
"""
def enable_buttons_side_by_side():
return tuple(gr.update(visible=True, interactive=True) for i in range(6))
def disable_buttons_side_by_side():
return tuple(gr.update(visible=i>=4, interactive=False) for i in range(6))
def enable_buttons():
return tuple(gr.update(interactive=True) for _ in range(5))
def disable_buttons():
return tuple(gr.update(interactive=False) for _ in range(5))
def clear_history():
return None, "", None
def clear_history_side_by_side():
return None, None, "", None, None
def clear_history_side_by_side_anony():
return None, None, "", None, None, gr.Markdown("", visible=False), gr.Markdown("", visible=False)
def clear_history_ie():
return None, "", "", "", None, None
def clear_history_side_by_side_ie():
return None, None, "", "", "", None, None, None
def clear_history_side_by_side_ie_anony():
return None, None, "", "", "", None, None, None, gr.Markdown("", visible=False), gr.Markdown("", visible=False)
def refresh_side_by_side(models, model_name_A, model_name_B):
state0, state1, prompt, input_images, output_images_A, output_images_B = models.get_result_of_random_case(model_name_A, model_name_B)
return state0, state1, prompt, input_images, output_images_A, output_images_B, enable_btn, enable_btn, enable_btn, model_name_A, model_name_B, "", ""
def refresh_side_by_side_anony(models):
state0, state1, prompt, input_images, output_images_A, output_images_B = models.get_result_of_random_case_anony()
return state0, state1, prompt, input_images, output_images_A, output_images_B, enable_btn, enable_btn, enable_btn, "", ""
def get_ip(request: gr.Request):
if request:
if "cf-connecting-ip" in request.headers:
ip = request.headers["cf-connecting-ip"] or request.client.host
else:
ip = request.client.host
else:
ip = None
return ip
def get_conv_log_filename():
t = datetime.datetime.now()
name = f"{t.year}-{t.month:02d}-{t.day:02d}-conv.json"
return name
def save_image_file_on_log_server(image_file:str):
image_file = Path(image_file).absolute().relative_to(os.getcwd())
image_file = str(image_file)
# Open the image file in binary mode
url = f"{LOG_SERVER_ADDR}/{SAVE_IMAGE}"
with open(image_file, 'rb') as f:
# Make the POST request, sending the image file and the image path
response = requests.post(url, files={'image': f}, data={'image_path': image_file})
return response
def append_json_item_on_log_server(json_item: Union[dict, str], log_file: str):
if isinstance(json_item, dict):
json_item = json.dumps(json_item)
log_file = Path(log_file).absolute().relative_to(os.getcwd())
log_file = str(log_file)
url = f"{LOG_SERVER_ADDR}/{APPEND_JSON}"
# Make the POST request, sending the JSON string and the log file name
response = requests.post(url, data={'json_str': json_item, 'file_name': log_file})
return response
def save_log_str_on_log_server(log_str: str, log_file: str):
log_file = Path(log_file).absolute().relative_to(os.getcwd())
log_file = str(log_file)
url = f"{LOG_SERVER_ADDR}/{SAVE_LOG}"
# Make the POST request, sending the log message and the log file name
response = requests.post(url, data={'message': log_str, 'log_path': log_file})
return response