awacke1 commited on
Commit
671865a
·
verified ·
1 Parent(s): abcc7b3

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +101 -0
app.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Dict
2
+ import httpx
3
+ import gradio as gr
4
+ import pandas as pd
5
+ from huggingface_hub import HfApi, ModelCard
6
+
7
+ def search_hub(query: str, search_type: str) -> pd.DataFrame:
8
+ api = HfApi()
9
+ if search_type == "Models":
10
+ results = api.list_models(search=query)
11
+ data = [{"id": model.modelId, "author": model.author, "downloads": model.downloads} for model in results]
12
+ elif search_type == "Datasets":
13
+ results = api.list_datasets(search=query)
14
+ data = [{"id": dataset.id, "author": dataset.author, "downloads": dataset.downloads} for dataset in results]
15
+ elif search_type == "Spaces":
16
+ results = api.list_spaces(search=query)
17
+ data = [{"id": space.id, "author": space.author} for space in results]
18
+ else:
19
+ data = []
20
+ return pd.DataFrame(data)
21
+
22
+ def open_url(row):
23
+ if row is not None and not row.empty:
24
+ url = f"https://huggingface.co/{row.iloc[0]['id']}"
25
+ return f'<a href="{url}" target="_blank">{url}</a>'
26
+ else:
27
+ return ""
28
+
29
+ def load_metadata(row, search_type):
30
+ if row is not None and not row.empty:
31
+ item_id = row.iloc[0]['id']
32
+
33
+ if search_type == "Models":
34
+ try:
35
+ card = ModelCard.load(item_id)
36
+ return card
37
+ except Exception as e:
38
+ return f"Error loading model card: {str(e)}"
39
+ elif search_type == "Datasets":
40
+ api = HfApi()
41
+ metadata = api.dataset_info(item_id)
42
+ return str(metadata)
43
+ elif search_type == "Spaces":
44
+ api = HfApi()
45
+ metadata = api.space_info(item_id)
46
+ return str(metadata)
47
+ else:
48
+ return ""
49
+ else:
50
+ return ""
51
+
52
+ def SwarmyTime(data: List[Dict]) -> Dict:
53
+ """
54
+ Aggregates all content from the given data.
55
+
56
+ :param data: List of dictionaries containing the search results
57
+ :return: Dictionary with aggregated content
58
+ """
59
+ aggregated = {
60
+ "total_items": len(data),
61
+ "unique_authors": set(),
62
+ "total_downloads": 0,
63
+ "item_types": {"Models": 0, "Datasets": 0, "Spaces": 0}
64
+ }
65
+
66
+ for item in data:
67
+ aggregated["unique_authors"].add(item.get("author", "Unknown"))
68
+ aggregated["total_downloads"] += item.get("downloads", 0)
69
+
70
+ if "modelId" in item:
71
+ aggregated["item_types"]["Models"] += 1
72
+ elif "dataset" in item.get("id", ""):
73
+ aggregated["item_types"]["Datasets"] += 1
74
+ else:
75
+ aggregated["item_types"]["Spaces"] += 1
76
+
77
+ aggregated["unique_authors"] = len(aggregated["unique_authors"])
78
+
79
+ return aggregated
80
+
81
+ with gr.Blocks() as demo:
82
+ gr.Markdown("## Search the Hugging Face Hub")
83
+ with gr.Row():
84
+ search_query = gr.Textbox(label="Search Query")
85
+ search_type = gr.Radio(["Models", "Datasets", "Spaces"], label="Search Type", value="Models")
86
+ search_button = gr.Button("Search")
87
+ results_df = gr.DataFrame(label="Search Results", wrap=True, interactive=True)
88
+ url_output = gr.HTML(label="URL")
89
+ metadata_output = gr.Textbox(label="Metadata", lines=10)
90
+ aggregated_output = gr.JSON(label="Aggregated Content")
91
+
92
+ def search_and_aggregate(query, search_type):
93
+ df = search_hub(query, search_type)
94
+ aggregated = SwarmyTime(df.to_dict('records'))
95
+ return df, aggregated
96
+
97
+ search_button.click(search_and_aggregate, inputs=[search_query, search_type], outputs=[results_df, aggregated_output])
98
+ results_df.select(open_url, outputs=[url_output])
99
+ results_df.select(load_metadata, inputs=[results_df, search_type], outputs=[metadata_output])
100
+
101
+ demo.launch(debug=True)