import csv
import datasets
import io
import json
import pandas as pd
from diffusion_bias_utils import *
from scipy.cluster.hierarchy import fcluster
_DATA_PATH = "../Data/diffusion_bias/"
# Faster version: Load pre-computed!
id_img_list = torch.load("identities_images_embeddings_CLIP_all.th")
# adding asian for the VQA model outputs
ethnicities = sorted(set(dct["ethnicity"] for dct in id_img_list)) + ["Asian"]
prof_img_list_sd14 = torch.load("professions_images_embeddings_CLIP_SD14.th")
prof_img_list_sd2 = torch.load("professions_images_embeddings_CLIP_SD2.th")
prof_img_list_dalle = torch.load("professions_images_embeddings_CLIP_dalle.th")
ethnicities_ordered = [
"",
"Caucasian",
"White",
"Multiracial",
"Black",
"African-American",
"Asian",
"South Asian",
"Southeast Asian",
"East Asian",
"Pacific Islander",
"Latino",
"Latinx",
"Hispanic",
"First Nations",
"Native American",
"American Indian",
"Indigenous American",
]
ethnicities_predicted = sorted(set(ans for dct in id_img_list for ans in dct["blip_question_ethnicity_answers"]))
ethnicities_predicted_ordered = [
"caucasian",
"white",
"black",
"asian",
"latino",
"native american",
]
assert len(ethnicities_predicted_ordered) == len(ethnicities_predicted)
assert len(ethnicities_ordered) == len(ethnicities)
# https://plotly.com/python/marker-style/
gender_shapes = {
"man_SD_14": "square-x",
"woman_SD_14": "diamond-x",
"person_SD_14": "circle-x",
"man_SD_2": "square-cross",
"woman_SD_2": "diamond-cross",
"person_SD_2": "circle-cross",
"man_DallE": "square",
"woman_DallE": "diamond",
"person_DallE": "circle",
}
id_shape_list = [
gender_shapes[dct["gender"] + "_" + dct["model"]]
for dct in id_img_list for img in dct["images"]
]
id_color_list = [
ethnicities.index(dct["ethnicity"])
for dct in id_img_list for img in dct["images"]
]
id_text_list = [
dct["text"]
for dct in id_img_list for img in dct["images"]
]
for w in ["gender", "ethnicity", "profession", "appearance"]:
print(f"##### Making picture for {w} question embeddings")
id_question_embeds = np.array([
embed
for dct in id_img_list for embed in dct[f"blip_question_{w}_embeddings"]
])
id_text_question_list = [
dct["text"] + f" | predicted {w}: {answer} | {i}-{j}"
for i, dct in enumerate(id_img_list) for j, answer in enumerate(dct[f"blip_question_{w}_answers"])
]
text_plot_2d = make_2d_plot(id_question_embeds, id_text_question_list, id_color_list, id_shape_list, umap_spread=50)
text_plot_2d.show()
##### Making picture for gender question embeddings
/home/yjernite/.local/lib/python3.10/site-packages/umap/umap_.py:1356: RuntimeWarning: divide by zero encountered in power return 1.0 / (1.0 + a * x ** (2 * b))
##### Making picture for ethnicity question embeddings
/home/yjernite/.local/lib/python3.10/site-packages/umap/umap_.py:1356: RuntimeWarning: divide by zero encountered in power
##### Making picture for profession question embeddings
/home/yjernite/.local/lib/python3.10/site-packages/umap/umap_.py:1356: RuntimeWarning: divide by zero encountered in power
##### Making picture for appearance question embeddings
/home/yjernite/.local/lib/python3.10/site-packages/umap/umap_.py:1356: RuntimeWarning: divide by zero encountered in power
all_clusters = torch.load("clusters_12_24_48.th")
clusters_12 = all_clusters[12]
clusters_24 = all_clusters[24]
clusters_48 = all_clusters[48]
w = "appearance"
_FIELD_NAME = f"blip_question_{w}_embeddings"
id_img_path_list = [img_path for dct in id_img_list for img_path in dct["images"]]
id_rep_list = [rep for dct in id_img_list for rep in dct[_FIELD_NAME]]
id_reps = np.array(id_rep_list)
for cl_dct in clusters_12:
print("============ ============ ============")
print(f"Cluster {cl_dct['cluster_id']} has {len(cl_dct['ids'])} items")
print(cl_dct["labels_gender"])
print(cl_dct["labels_model"])
print(cl_dct["labels_ethnicity"])
examplar_scores = sorted([(eid, np.dot(id_reps[eid], cl_dct["centroid"])) for eid in cl_dct["ids"]], key=lambda x:x[1], reverse=True)
# print(examplar_scores)
print("============ most representative")
display(Image.open(id_img_path_list[examplar_scores[0][0]]).convert('RGB'))
print("============ least representative")
display(Image.open(id_img_path_list[examplar_scores[-1][0]]).convert('RGB'))
============ ============ ============ Cluster 0 has 173 items [('man', 97), ('person', 76)] [('SD_2', 69), ('DallE', 58), ('SD_14', 46)] [('White', 52), ('', 52), ('Caucasian', 48), ('Latino', 7), ('Latinx', 6), ('Hispanic', 5), ('Multiracial', 1), ('Black', 1), ('Southeast Asian', 1)] ============ most representative
============ least representative
============ ============ ============ Cluster 1 has 151 items [('man', 84), ('person', 66), ('woman', 1)] [('SD_2', 60), ('DallE', 50), ('SD_14', 41)] [('African-American', 52), ('Black', 52), ('Multiracial', 35), ('White', 3), ('Pacific Islander', 2), ('Hispanic', 2), ('Latinx', 2), ('Caucasian', 1), ('', 1), ('Latino', 1)] ============ most representative
============ least representative
============ ============ ============ Cluster 2 has 141 items [('woman', 109), ('person', 32)] [('SD_14', 70), ('SD_2', 60), ('DallE', 11)] [('Multiracial', 36), ('African-American', 32), ('Black', 28), ('Latinx', 9), ('', 8), ('White', 6), ('Pacific Islander', 6), ('Latino', 6), ('Hispanic', 5), ('Caucasian', 5)] ============ most representative
============ least representative
============ ============ ============ Cluster 3 has 131 items [('woman', 111), ('person', 20)] [('SD_2', 46), ('SD_14', 44), ('DallE', 41)] [('Latinx', 27), ('White', 27), ('Caucasian', 24), ('', 24), ('Latino', 15), ('Hispanic', 5), ('Pacific Islander', 3), ('Southeast Asian', 2), ('South Asian', 2), ('East Asian', 1), ('First Nations', 1)] ============ most representative
============ least representative
============ ============ ============ Cluster 4 has 129 items [('man', 75), ('person', 54)] [('DallE', 69), ('SD_2', 35), ('SD_14', 25)] [('Latino', 25), ('Pacific Islander', 23), ('Hispanic', 23), ('Latinx', 15), ('Caucasian', 9), ('First Nations', 9), ('Multiracial', 6), ('Native American', 6), ('Indigenous American', 3), ('American Indian', 3), ('', 3), ('Southeast Asian', 2), ('White', 2)] ============ most representative
============ least representative
============ ============ ============ Cluster 5 has 126 items [('woman', 56), ('man', 35), ('person', 35)] [('SD_2', 97), ('SD_14', 26), ('DallE', 3)] [('Indigenous American', 40), ('First Nations', 38), ('American Indian', 24), ('Native American', 23), ('Caucasian', 1)] ============ most representative
============ least representative
============ ============ ============ Cluster 6 has 121 items [('person', 48), ('man', 48), ('woman', 25)] [('SD_14', 93), ('SD_2', 22), ('DallE', 6)] [('Native American', 40), ('American Indian', 40), ('First Nations', 23), ('Indigenous American', 18)] ============ most representative
============ least representative
============ ============ ============ Cluster 7 has 115 items [('woman', 109), ('person', 6)] [('DallE', 89), ('SD_14', 14), ('SD_2', 12)] [('South Asian', 30), ('Hispanic', 10), ('American Indian', 10), ('Latinx', 9), ('Pacific Islander', 9), ('Latino', 9), ('Black', 8), ('Native American', 8), ('Indigenous American', 8), ('African-American', 6), ('Multiracial', 5), ('First Nations', 2), ('Southeast Asian', 1)] ============ most representative
============ least representative
============ ============ ============ Cluster 8 has 112 items [('woman', 89), ('person', 23)] [('SD_14', 47), ('DallE', 36), ('SD_2', 29)] [('East Asian', 40), ('Southeast Asian', 28), ('Pacific Islander', 21), ('First Nations', 7), ('Indigenous American', 6), ('Latino', 4), ('Native American', 2), ('', 2), ('Caucasian', 1), ('Latinx', 1)] ============ most representative
============ least representative
============ ============ ============ Cluster 9 has 106 items [('person', 54), ('man', 52)] [('DallE', 94), ('SD_2', 8), ('SD_14', 4)] [('Southeast Asian', 16), ('Indigenous American', 15), ('American Indian', 13), ('Pacific Islander', 11), ('Native American', 11), ('Latino', 10), ('First Nations', 10), ('Latinx', 6), ('Multiracial', 5), ('Hispanic', 4), ('East Asian', 3), ('Caucasian', 1), ('Black', 1)] ============ most representative
============ least representative
============ ============ ============ Cluster 10 has 100 items [('man', 57), ('person', 43)] [('SD_14', 43), ('SD_2', 39), ('DallE', 18)] [('East Asian', 46), ('Southeast Asian', 40), ('Pacific Islander', 13), ('Latinx', 1)] ============ most representative
============ least representative
============ ============ ============ Cluster 11 has 95 items [('man', 52), ('person', 43)] [('DallE', 35), ('SD_2', 33), ('SD_14', 27)] [('South Asian', 58), ('Latinx', 14), ('Latino', 13), ('Hispanic', 6), ('Multiracial', 2), ('Pacific Islander', 2)] ============ most representative
============ least representative
pre_pandas_clusters = []
for dct in clusters_12:
line_dct = {"total": len(dct["ids"])}
gdr_dct = dict(dct["labels_gender"])
for k in ["woman", "man", "person"]:
line_dct[k] = gdr_dct.get(k, 0)
eth_dct = dict(dct["labels_ethnicity"])
for k in ethnicities_ordered:
line_dct[k] = eth_dct.get(k, 0)
mod_dct = dict(dct["labels_model"])
for k in ["DallE", "SD_2", "SD_14"]:
line_dct[k] = mod_dct.get(k, 0)
pre_pandas_clusters += [line_dct]
print("#### Visualizing cluster compositions")
clusters_df = pd.DataFrame.from_dict(
pre_pandas_clusters,
)
clusters_df.style.background_gradient(axis=None, vmin=0, vmax=100, cmap="YlGnBu")
#### Visualizing cluster compositions
total | woman | man | person | Caucasian | White | Multiracial | Black | African-American | Asian | South Asian | Southeast Asian | East Asian | Pacific Islander | Latino | Latinx | Hispanic | First Nations | Native American | American Indian | Indigenous American | DallE | SD_2 | SD_14 | ||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 173 | 0 | 97 | 76 | 52 | 48 | 52 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 7 | 6 | 5 | 0 | 0 | 0 | 0 | 58 | 69 | 46 |
1 | 151 | 1 | 84 | 66 | 1 | 1 | 3 | 35 | 52 | 52 | 0 | 0 | 0 | 0 | 2 | 1 | 2 | 2 | 0 | 0 | 0 | 0 | 50 | 60 | 41 |
2 | 141 | 109 | 0 | 32 | 8 | 5 | 6 | 36 | 28 | 32 | 0 | 0 | 0 | 0 | 6 | 6 | 9 | 5 | 0 | 0 | 0 | 0 | 11 | 60 | 70 |
3 | 131 | 111 | 0 | 20 | 24 | 24 | 27 | 0 | 0 | 0 | 0 | 2 | 2 | 1 | 3 | 15 | 27 | 5 | 1 | 0 | 0 | 0 | 41 | 46 | 44 |
4 | 129 | 0 | 75 | 54 | 3 | 9 | 2 | 6 | 0 | 0 | 0 | 0 | 2 | 0 | 23 | 25 | 15 | 23 | 9 | 6 | 3 | 3 | 69 | 35 | 25 |
5 | 126 | 56 | 35 | 35 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 23 | 24 | 40 | 3 | 97 | 26 |
6 | 121 | 25 | 48 | 48 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 40 | 40 | 18 | 6 | 22 | 93 |
7 | 115 | 109 | 0 | 6 | 0 | 0 | 0 | 5 | 8 | 6 | 0 | 30 | 1 | 0 | 9 | 9 | 9 | 10 | 2 | 8 | 10 | 8 | 89 | 12 | 14 |
8 | 112 | 89 | 0 | 23 | 2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 40 | 21 | 4 | 1 | 0 | 7 | 2 | 0 | 6 | 36 | 29 | 47 |
9 | 106 | 0 | 52 | 54 | 0 | 1 | 0 | 5 | 1 | 0 | 0 | 0 | 16 | 3 | 11 | 10 | 6 | 4 | 10 | 11 | 13 | 15 | 94 | 8 | 4 |
10 | 100 | 0 | 57 | 43 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 46 | 13 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 18 | 39 | 43 |
11 | 95 | 0 | 52 | 43 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 58 | 0 | 0 | 2 | 13 | 14 | 6 | 0 | 0 | 0 | 0 | 35 | 33 | 27 |
ethnicities_predicted_ordered = [
"caucasian",
"white",
"black",
"asian",
"hispanic",
"native american",
]
pre_pandas = [
{
"gender_generated": dct["gender"],
"ethnicity_generated": dct["ethnicity"],
"gender_predicted": gdr,
"ethnicity_predicted": eth,
"appearance_predicted": app,
"profession_predicted": pro,
"model": dct["model"],
}
for dct in id_img_list
for gdr, eth, app, pro in zip(
dct["blip_question_gender_answers"],
dct["blip_question_ethnicity_answers"],
dct["blip_question_appearance_answers"],
dct["blip_question_profession_answers"],
)
]
id_df = pd.DataFrame.from_dict(pre_pandas)
confusion_matrix = pd.crosstab(
id_df['ethnicity_generated'],
id_df['ethnicity_predicted'],
rownames=['Generated'],
colnames=['Predicted'],
)
print("#### Visualizing ethnicity generation calibration across models")
confusion_matrix.reindex(
index=[eth for eth in ethnicities_ordered if eth != "Asian"],
columns=ethnicities_predicted_ordered,
method=None
).style.background_gradient(
axis=None,
vmin=0,
vmax=90,
cmap="YlGnBu"
).format(precision=0)
#### Visualizing ethnicity generation calibration across models
Predicted | caucasian | white | black | asian | hispanic | native american |
---|---|---|---|---|---|---|
Generated | ||||||
48 | 28 | 10 | 4 | 0 | 0 | |
Caucasian | 58 | 28 | 1 | 3 | 0 | 0 |
White | 57 | 26 | 6 | 1 | 0 | 0 |
Multiracial | 2 | 2 | 80 | 2 | 4 | 0 |
Black | 0 | 0 | 90 | 0 | 0 | 0 |
African-American | 0 | 0 | 90 | 0 | 0 | 0 |
South Asian | 0 | 0 | 46 | 39 | 5 | 0 |
Southeast Asian | 0 | 0 | 2 | 86 | 2 | 0 |
East Asian | 0 | 0 | 0 | 90 | 0 | 0 |
Pacific Islander | 0 | 0 | 13 | 58 | 19 | 0 |
Latino | 2 | 17 | 2 | 21 | 48 | 0 |
Latinx | 6 | 17 | 9 | 24 | 34 | 0 |
Hispanic | 6 | 6 | 5 | 10 | 33 | 0 |
First Nations | 1 | 5 | 4 | 33 | 15 | 32 |
Native American | 0 | 4 | 3 | 17 | 13 | 53 |
American Indian | 0 | 6 | 6 | 8 | 19 | 51 |
Indigenous American | 0 | 6 | 2 | 21 | 31 | 30 |
model_name = "SD_14"
confusion_matrix = pd.crosstab(
id_df[id_df["model"] == model_name]['ethnicity_generated'],
id_df[id_df["model"] == model_name]['ethnicity_predicted'],
rownames=['Generated'],
colnames=['Predicted'],
)
print(f"#### Visualizing ethnicity generation calibration for: {model_name}")
confusion_matrix.reindex(
index=[eth for eth in ethnicities_ordered if eth != "Asian"],
columns=ethnicities_predicted_ordered,
method=None
).style.background_gradient(
axis=None,
vmin=0,
vmax=90,
cmap="YlGnBu"
).format(precision=0)
#### Visualizing ethnicity generation calibration for: SD_14
Predicted | caucasian | white | black | asian | hispanic | native american |
---|---|---|---|---|---|---|
Generated | ||||||
12 | 12 | 4 | 2 | 0 | 0 | |
Caucasian | 13 | 16 | 1 | 0 | 0 | 0 |
White | 11 | 14 | 5 | 0 | 0 | 0 |
Multiracial | 0 | 2 | 27 | 0 | 1 | 0 |
Black | 0 | 0 | 30 | 0 | 0 | 0 |
African-American | 0 | 0 | 30 | 0 | 0 | 0 |
South Asian | 0 | 0 | 10 | 20 | 0 | 0 |
Southeast Asian | 0 | 0 | 0 | 30 | 0 | 0 |
East Asian | 0 | 0 | 0 | 30 | 0 | 0 |
Pacific Islander | 0 | 0 | 0 | 28 | 2 | 0 |
Latino | 0 | 12 | 0 | 1 | 17 | 0 |
Latinx | 3 | 13 | 2 | 3 | 9 | 0 |
Hispanic | nan | nan | nan | nan | nan | nan |
First Nations | 0 | 0 | 1 | 4 | 1 | 24 |
Native American | 0 | 0 | 1 | 0 | 0 | 29 |
American Indian | 0 | 0 | 0 | 0 | 0 | 30 |
Indigenous American | 0 | 2 | 1 | 0 | 5 | 22 |
model_name = "SD_2"
confusion_matrix = pd.crosstab(
id_df[id_df["model"] == model_name]['ethnicity_generated'],
id_df[id_df["model"] == model_name]['ethnicity_predicted'],
rownames=['Generated'],
colnames=['Predicted'],
)
print(f"#### Visualizing ethnicity generation calibration for: {model_name}")
confusion_matrix.reindex(
index=[eth for eth in ethnicities_ordered if eth != "Asian"],
columns=ethnicities_predicted_ordered,
method=None
).style.background_gradient(
axis=None,
vmin=0,
vmax=90,
cmap="YlGnBu"
).format(precision=0)
#### Visualizing ethnicity generation calibration for: SD_2
Predicted | caucasian | white | black | asian | hispanic | native american |
---|---|---|---|---|---|---|
Generated | ||||||
8 | 16 | 6 | 0 | 0 | 0 | |
Caucasian | 18 | 12 | 0 | 0 | 0 | 0 |
White | 18 | 11 | 1 | 0 | 0 | 0 |
Multiracial | 0 | 0 | 29 | 1 | 0 | 0 |
Black | 0 | 0 | 30 | 0 | 0 | 0 |
African-American | 0 | 0 | 30 | 0 | 0 | 0 |
South Asian | 0 | 0 | 27 | 3 | 0 | 0 |
Southeast Asian | 0 | 0 | 1 | 29 | 0 | 0 |
East Asian | 0 | 0 | 0 | 30 | 0 | 0 |
Pacific Islander | 0 | 0 | 7 | 22 | 1 | 0 |
Latino | 1 | 5 | 1 | 3 | 20 | 0 |
Latinx | 3 | 4 | 5 | 5 | 13 | 0 |
Hispanic | 5 | 5 | 4 | 0 | 16 | 0 |
First Nations | 0 | 3 | 3 | 12 | 5 | 7 |
Native American | 0 | 3 | 1 | 3 | 3 | 20 |
American Indian | 0 | 6 | 2 | 2 | 5 | 15 |
Indigenous American | 0 | 4 | 1 | 7 | 12 | 6 |
model_name = "DallE"
confusion_matrix = pd.crosstab(
id_df[id_df["model"] == model_name]['ethnicity_generated'],
id_df[id_df["model"] == model_name]['ethnicity_predicted'],
rownames=['Generated'],
colnames=['Predicted'],
)
print(f"#### Visualizing ethnicity generation calibration for: {model_name}")
confusion_matrix.reindex(
index=[eth for eth in ethnicities_ordered if eth != "Asian"],
columns=ethnicities_predicted_ordered,
method=None
).style.background_gradient(
axis=None,
vmin=0,
vmax=90,
cmap="YlGnBu"
).format(precision=0)
#### Visualizing ethnicity generation calibration for: DallE
Predicted | caucasian | white | black | asian | hispanic | native american |
---|---|---|---|---|---|---|
Generated | ||||||
28 | 0 | 0 | 2 | 0 | 0 | |
Caucasian | 27 | 0 | 0 | 3 | 0 | 0 |
White | 28 | 1 | 0 | 1 | 0 | 0 |
Multiracial | 2 | 0 | 24 | 1 | 3 | 0 |
Black | 0 | 0 | 30 | 0 | 0 | 0 |
African-American | 0 | 0 | 30 | 0 | 0 | 0 |
South Asian | 0 | 0 | 9 | 16 | 5 | 0 |
Southeast Asian | 0 | 0 | 1 | 27 | 2 | 0 |
East Asian | 0 | 0 | 0 | 30 | 0 | 0 |
Pacific Islander | 0 | 0 | 6 | 8 | 16 | 0 |
Latino | 1 | 0 | 1 | 17 | 11 | 0 |
Latinx | 0 | 0 | 2 | 16 | 12 | 0 |
Hispanic | 1 | 1 | 1 | 10 | 17 | 0 |
First Nations | 1 | 2 | 0 | 17 | 9 | 1 |
Native American | 0 | 1 | 1 | 14 | 10 | 4 |
American Indian | 0 | 0 | 4 | 6 | 14 | 6 |
Indigenous American | 0 | 0 | 0 | 14 | 14 | 2 |
id_list_with_clusters = json.load(open("identity_images_preds_clusters_all_BLIP.json"))
prof_list_with_clusters = json.load(open("profession_images_preds_clusters_all_BLIP.json"))
adjectives_to_cluster_12 = {}
for dct in prof_list_with_clusters:
if dct["model"] == "DallE":
adj = dct["prompt_adjective"]
cl_id = dct["cluster_id_12"]
adjectives_to_cluster_12[adj] = adjectives_to_cluster_12.get(
adj,
dict([("adjective", adj)] + [(i, 0) for i in range(12)])
)
adjectives_to_cluster_12[adj][cl_id] += 1
pre_pandas_cl_adj_12 = sorted(adjectives_to_cluster_12.values(), key=lambda x:x["adjective"])
cl_adj_12_df = pd.DataFrame.from_dict(pre_pandas_cl_adj_12)
cl_adj_12_df.style.background_gradient(
axis=None,
vmin=0,
vmax=500,
cmap="YlGnBu"
).format(precision=0)
adjective | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | ambitious | 1010 | 27 | 4 | 315 | 92 | 0 | 0 | 4 | 5 | 36 | 3 | 4 |
1 | assertive | 1092 | 4 | 2 | 335 | 22 | 0 | 0 | 2 | 4 | 38 | 1 | 0 |
2 | committed | 1118 | 6 | 0 | 237 | 92 | 0 | 0 | 1 | 9 | 33 | 2 | 2 |
3 | compassionate | 1008 | 8 | 1 | 354 | 112 | 0 | 0 | 4 | 3 | 9 | 1 | 0 |
4 | confident | 1063 | 19 | 4 | 225 | 144 | 0 | 0 | 9 | 2 | 27 | 1 | 6 |
5 | considerate | 1022 | 3 | 1 | 330 | 115 | 0 | 0 | 5 | 2 | 18 | 2 | 2 |
6 | decisive | 1199 | 0 | 0 | 207 | 47 | 0 | 0 | 3 | 2 | 38 | 2 | 2 |
7 | determined | 1112 | 21 | 6 | 260 | 23 | 0 | 0 | 8 | 5 | 62 | 1 | 2 |
8 | emotional | 1037 | 0 | 0 | 313 | 92 | 0 | 0 | 9 | 1 | 48 | 0 | 0 |
9 | gentle | 1043 | 4 | 1 | 338 | 88 | 0 | 0 | 1 | 3 | 20 | 1 | 1 |
10 | honest | 1090 | 6 | 1 | 258 | 106 | 0 | 0 | 3 | 4 | 29 | 1 | 2 |
11 | intellectual | 1328 | 1 | 0 | 134 | 23 | 0 | 0 | 1 | 0 | 11 | 1 | 1 |
12 | modest | 1119 | 1 | 0 | 240 | 84 | 0 | 0 | 2 | 9 | 41 | 4 | 0 |
13 | no_adjective | 966 | 8 | 2 | 353 | 104 | 0 | 0 | 4 | 13 | 40 | 7 | 3 |
14 | outspoken | 1207 | 9 | 0 | 178 | 39 | 0 | 0 | 8 | 6 | 52 | 1 | 0 |
15 | pleasant | 918 | 1 | 0 | 403 | 163 | 0 | 0 | 3 | 0 | 10 | 2 | 0 |
16 | self-confident | 1027 | 2 | 0 | 341 | 104 | 0 | 0 | 4 | 1 | 20 | 1 | 0 |
17 | sensitive | 1057 | 0 | 0 | 359 | 33 | 0 | 0 | 8 | 4 | 38 | 1 | 0 |
18 | stubborn | 1201 | 0 | 1 | 195 | 1 | 0 | 0 | 3 | 24 | 75 | 0 | 0 |
19 | supportive | 904 | 4 | 4 | 464 | 112 | 0 | 0 | 0 | 2 | 8 | 1 | 1 |
20 | unreasonable | 1249 | 1 | 0 | 154 | 8 | 0 | 0 | 2 | 11 | 74 | 1 | 0 |
# Only show clusters that have at least 150 assigned examples
s_cols = [True] + list(cl_adj_12_df.sum(axis=0)[1:] > 150)
cl_adj_12_df[1:].loc[:, s_cols].style.background_gradient(
axis=None,
vmin=0,
vmax=1000,
cmap="YlGnBu"
).format(precision=0)
adjective | 0 | 3 | 4 | 9 | |
---|---|---|---|---|---|
1 | assertive | 1092 | 335 | 22 | 38 |
2 | committed | 1118 | 237 | 92 | 33 |
3 | compassionate | 1008 | 354 | 112 | 9 |
4 | confident | 1063 | 225 | 144 | 27 |
5 | considerate | 1022 | 330 | 115 | 18 |
6 | decisive | 1199 | 207 | 47 | 38 |
7 | determined | 1112 | 260 | 23 | 62 |
8 | emotional | 1037 | 313 | 92 | 48 |
9 | gentle | 1043 | 338 | 88 | 20 |
10 | honest | 1090 | 258 | 106 | 29 |
11 | intellectual | 1328 | 134 | 23 | 11 |
12 | modest | 1119 | 240 | 84 | 41 |
13 | no_adjective | 966 | 353 | 104 | 40 |
14 | outspoken | 1207 | 178 | 39 | 52 |
15 | pleasant | 918 | 403 | 163 | 10 |
16 | self-confident | 1027 | 341 | 104 | 20 |
17 | sensitive | 1057 | 359 | 33 | 38 |
18 | stubborn | 1201 | 195 | 1 | 75 |
19 | supportive | 904 | 464 | 112 | 8 |
20 | unreasonable | 1249 | 154 | 8 | 74 |
# Composition of the clusters with at least 150 assigned examples
meaningful_clusters = [i for i, b in enumerate(list(cl_adj_12_df.sum(axis=0)[1:] > 150)) if b]
pre_pandas_clusters = []
for cl_id in meaningful_clusters:
dct = clusters_12[cl_id]
line_dct = {
"cluster_id": cl_id,
# "total": len(dct["ids"]),
}
gdr_dct = dict(dct["labels_gender"])
for k in ["woman", "man", "person"]:
line_dct[k] = gdr_dct.get(k, 0)
eth_dct = dict(dct["labels_ethnicity"])
for k in ethnicities_ordered:
line_dct[k] = eth_dct.get(k, 0)
mod_dct = dict(dct["labels_model"])
# for k in ["DallE", "SD_2", "SD_14"]:
# line_dct[k] = mod_dct.get(k, 0)
pre_pandas_clusters += [line_dct]
print("#### Visualizing cluster compositions")
clusters_df = pd.DataFrame.from_dict(
pre_pandas_clusters,
)
clusters_df.style.background_gradient(axis=None, vmin=0, vmax=50, cmap="YlGnBu")
#### Visualizing cluster compositions
cluster_id | woman | man | person | Caucasian | White | Multiracial | Black | African-American | Asian | South Asian | Southeast Asian | East Asian | Pacific Islander | Latino | Latinx | Hispanic | First Nations | Native American | American Indian | Indigenous American | ||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 0 | 0 | 97 | 76 | 52 | 48 | 52 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 7 | 6 | 5 | 0 | 0 | 0 | 0 |
1 | 3 | 111 | 0 | 20 | 24 | 24 | 27 | 0 | 0 | 0 | 0 | 2 | 2 | 1 | 3 | 15 | 27 | 5 | 1 | 0 | 0 | 0 |
2 | 4 | 0 | 75 | 54 | 3 | 9 | 2 | 6 | 0 | 0 | 0 | 0 | 2 | 0 | 23 | 25 | 15 | 23 | 9 | 6 | 3 | 3 |
3 | 9 | 0 | 52 | 54 | 0 | 1 | 0 | 5 | 1 | 0 | 0 | 0 | 16 | 3 | 11 | 10 | 6 | 4 | 10 | 11 | 13 | 15 |
# Example use: look at cluster assignment per adjective for DallE
adjectives_to_cluster_24 = {}
for dct in prof_list_with_clusters:
if dct["model"] == "DallE":
adj = dct["prompt_adjective"]
cl_id = dct["cluster_id_24"]
adjectives_to_cluster_24[adj] = adjectives_to_cluster_24.get(
adj,
dict([("adjective", adj)] + [(i, 0) for i in range(24)])
)
adjectives_to_cluster_24[adj][cl_id] += 1
pre_pandas_cl_adj_24 = sorted(adjectives_to_cluster_24.values(), key=lambda x:x["adjective"])
cl_adj_24_df = pd.DataFrame.from_dict(pre_pandas_cl_adj_24)
cl_adj_24_df.style.background_gradient(
axis=None,
vmin=0,
vmax=500,
cmap="YlGnBu"
).format(precision=0)
adjective | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | ambitious | 2 | 445 | 3 | 1 | 0 | 0 | 247 | 0 | 9 | 524 | 71 | 2 | 0 | 38 | 6 | 4 | 6 | 3 | 14 | 0 | 15 | 9 | 0 | 101 |
1 | assertive | 2 | 247 | 0 | 2 | 0 | 0 | 314 | 0 | 0 | 834 | 24 | 10 | 0 | 5 | 3 | 1 | 1 | 1 | 3 | 0 | 15 | 2 | 0 | 36 |
2 | committed | 1 | 456 | 4 | 0 | 0 | 0 | 181 | 0 | 5 | 621 | 53 | 5 | 0 | 17 | 5 | 0 | 12 | 2 | 4 | 0 | 19 | 1 | 0 | 114 |
3 | compassionate | 6 | 698 | 0 | 0 | 0 | 0 | 240 | 0 | 2 | 224 | 106 | 1 | 1 | 23 | 3 | 5 | 4 | 1 | 9 | 2 | 20 | 0 | 0 | 155 |
4 | confident | 10 | 667 | 4 | 4 | 0 | 0 | 145 | 0 | 9 | 302 | 73 | 7 | 0 | 47 | 3 | 5 | 4 | 2 | 8 | 0 | 23 | 3 | 0 | 184 |
5 | considerate | 4 | 539 | 4 | 0 | 0 | 0 | 254 | 0 | 2 | 413 | 75 | 6 | 0 | 44 | 0 | 3 | 2 | 3 | 0 | 0 | 24 | 0 | 0 | 127 |
6 | decisive | 1 | 267 | 0 | 0 | 0 | 0 | 200 | 0 | 0 | 926 | 11 | 7 | 0 | 22 | 3 | 1 | 1 | 2 | 0 | 1 | 29 | 1 | 0 | 28 |
7 | determined | 7 | 246 | 2 | 5 | 0 | 0 | 253 | 0 | 0 | 863 | 13 | 22 | 0 | 12 | 2 | 1 | 1 | 4 | 16 | 0 | 34 | 7 | 0 | 12 |
8 | emotional | 5 | 294 | 0 | 0 | 0 | 0 | 270 | 0 | 1 | 735 | 50 | 0 | 1 | 22 | 7 | 0 | 0 | 0 | 0 | 0 | 30 | 0 | 0 | 85 |
9 | gentle | 0 | 371 | 1 | 0 | 0 | 0 | 298 | 1 | 0 | 600 | 39 | 2 | 0 | 34 | 4 | 7 | 2 | 3 | 4 | 1 | 37 | 0 | 0 | 96 |
10 | honest | 6 | 406 | 3 | 0 | 0 | 0 | 214 | 0 | 0 | 634 | 35 | 6 | 0 | 43 | 1 | 6 | 5 | 1 | 6 | 4 | 48 | 1 | 0 | 81 |
11 | intellectual | 1 | 838 | 1 | 0 | 0 | 0 | 131 | 0 | 2 | 464 | 3 | 4 | 0 | 17 | 1 | 1 | 0 | 1 | 0 | 0 | 12 | 0 | 0 | 24 |
12 | modest | 2 | 305 | 1 | 0 | 0 | 0 | 239 | 0 | 0 | 799 | 5 | 5 | 0 | 44 | 1 | 1 | 7 | 6 | 1 | 1 | 41 | 0 | 0 | 42 |
13 | no_adjective | 6 | 387 | 3 | 0 | 0 | 0 | 297 | 0 | 0 | 528 | 52 | 10 | 1 | 46 | 3 | 2 | 17 | 11 | 7 | 1 | 33 | 2 | 0 | 94 |
14 | outspoken | 5 | 335 | 0 | 0 | 0 | 0 | 168 | 0 | 7 | 854 | 15 | 6 | 0 | 9 | 6 | 1 | 3 | 1 | 8 | 0 | 47 | 1 | 0 | 34 |
15 | pleasant | 4 | 557 | 0 | 0 | 0 | 0 | 298 | 0 | 1 | 266 | 101 | 1 | 0 | 50 | 0 | 3 | 0 | 3 | 1 | 0 | 12 | 0 | 0 | 203 |
16 | self-confident | 3 | 499 | 1 | 0 | 0 | 0 | 275 | 0 | 0 | 454 | 66 | 6 | 0 | 38 | 4 | 1 | 2 | 1 | 2 | 0 | 22 | 0 | 0 | 126 |
17 | sensitive | 4 | 303 | 1 | 0 | 0 | 0 | 339 | 1 | 0 | 726 | 31 | 5 | 1 | 13 | 2 | 1 | 0 | 1 | 0 | 0 | 35 | 0 | 0 | 37 |
18 | stubborn | 0 | 76 | 0 | 0 | 0 | 0 | 223 | 2 | 0 | 1152 | 0 | 8 | 0 | 0 | 4 | 1 | 0 | 0 | 0 | 4 | 30 | 0 | 0 | 0 |
19 | supportive | 1 | 609 | 1 | 3 | 0 | 0 | 303 | 0 | 2 | 184 | 144 | 0 | 0 | 40 | 1 | 12 | 3 | 1 | 4 | 0 | 9 | 1 | 0 | 182 |
20 | unreasonable | 1 | 134 | 0 | 0 | 0 | 0 | 166 | 0 | 1 | 1151 | 1 | 3 | 0 | 2 | 11 | 0 | 1 | 2 | 0 | 0 | 24 | 0 | 0 | 3 |
# Only show clusters that have at least 150 assigned examples
s_cols = [True] + list(cl_adj_24_df.sum(axis=0)[1:] > 150)
cl_adj_24_df[1:].loc[:, s_cols].style.background_gradient(
axis=None,
vmin=0,
vmax=1000,
cmap="YlGnBu"
).format(precision=0)
adjective | 1 | 6 | 9 | 10 | 13 | 20 | 23 | |
---|---|---|---|---|---|---|---|---|
1 | assertive | 247 | 314 | 834 | 24 | 5 | 15 | 36 |
2 | committed | 456 | 181 | 621 | 53 | 17 | 19 | 114 |
3 | compassionate | 698 | 240 | 224 | 106 | 23 | 20 | 155 |
4 | confident | 667 | 145 | 302 | 73 | 47 | 23 | 184 |
5 | considerate | 539 | 254 | 413 | 75 | 44 | 24 | 127 |
6 | decisive | 267 | 200 | 926 | 11 | 22 | 29 | 28 |
7 | determined | 246 | 253 | 863 | 13 | 12 | 34 | 12 |
8 | emotional | 294 | 270 | 735 | 50 | 22 | 30 | 85 |
9 | gentle | 371 | 298 | 600 | 39 | 34 | 37 | 96 |
10 | honest | 406 | 214 | 634 | 35 | 43 | 48 | 81 |
11 | intellectual | 838 | 131 | 464 | 3 | 17 | 12 | 24 |
12 | modest | 305 | 239 | 799 | 5 | 44 | 41 | 42 |
13 | no_adjective | 387 | 297 | 528 | 52 | 46 | 33 | 94 |
14 | outspoken | 335 | 168 | 854 | 15 | 9 | 47 | 34 |
15 | pleasant | 557 | 298 | 266 | 101 | 50 | 12 | 203 |
16 | self-confident | 499 | 275 | 454 | 66 | 38 | 22 | 126 |
17 | sensitive | 303 | 339 | 726 | 31 | 13 | 35 | 37 |
18 | stubborn | 76 | 223 | 1152 | 0 | 0 | 30 | 0 |
19 | supportive | 609 | 303 | 184 | 144 | 40 | 9 | 182 |
20 | unreasonable | 134 | 166 | 1151 | 1 | 2 | 24 | 3 |
# Composition of the clusters with at least 150 assigned examples
meaningful_clusters = [i for i, b in enumerate(list(cl_adj_24_df.sum(axis=0)[1:] > 150)) if b]
pre_pandas_clusters = []
for cl_id in meaningful_clusters:
dct = clusters_24[cl_id]
line_dct = {
"cluster_id": cl_id,
# "total": len(dct["ids"]),
}
gdr_dct = dict(dct["labels_gender"])
for k in ["woman", "man", "person"]:
line_dct[k] = gdr_dct.get(k, 0)
eth_dct = dict(dct["labels_ethnicity"])
for k in ethnicities_ordered:
line_dct[k] = eth_dct.get(k, 0)
mod_dct = dict(dct["labels_model"])
# for k in ["DallE", "SD_2", "SD_14"]:
# line_dct[k] = mod_dct.get(k, 0)
pre_pandas_clusters += [line_dct]
print("#### Visualizing cluster compositions")
clusters_df = pd.DataFrame.from_dict(
pre_pandas_clusters,
)
clusters_df.style.background_gradient(axis=None, vmin=0, vmax=50, cmap="YlGnBu")
#### Visualizing cluster compositions
cluster_id | woman | man | person | Caucasian | White | Multiracial | Black | African-American | Asian | South Asian | Southeast Asian | East Asian | Pacific Islander | Latino | Latinx | Hispanic | First Nations | Native American | American Indian | Indigenous American | ||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 1 | 0 | 68 | 45 | 32 | 26 | 34 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 7 | 6 | 5 | 0 | 0 | 0 | 0 |
1 | 6 | 64 | 0 | 8 | 15 | 16 | 21 | 0 | 0 | 0 | 0 | 2 | 2 | 1 | 2 | 4 | 8 | 0 | 1 | 0 | 0 | 0 |
2 | 9 | 0 | 29 | 31 | 20 | 22 | 18 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
3 | 10 | 47 | 0 | 12 | 9 | 8 | 6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 11 | 19 | 5 | 0 | 0 | 0 | 0 |
4 | 13 | 0 | 27 | 26 | 0 | 0 | 0 | 4 | 0 | 0 | 0 | 0 | 2 | 0 | 16 | 11 | 9 | 11 | 0 | 0 | 0 | 0 |
5 | 20 | 0 | 32 | 11 | 3 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 11 | 1 | 0 | 8 | 6 | 3 | 3 |
6 | 23 | 0 | 16 | 17 | 0 | 9 | 2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 5 | 12 | 1 | 0 | 0 | 0 |
adjectives_to_cluster_48 = {}
for dct in prof_list_with_clusters:
if dct["model"] == "DallE":
adj = dct["prompt_adjective"]
cl_id = dct["cluster_id_48"]
adjectives_to_cluster_48[adj] = adjectives_to_cluster_48.get(
adj,
dict([("adjective", adj)] + [(i, 0) for i in range(48)])
)
adjectives_to_cluster_48[adj][cl_id] += 1
pre_pandas_cl_adj_48 = sorted(adjectives_to_cluster_48.values(), key=lambda x:x["adjective"])
cl_adj_48_df = pd.DataFrame.from_dict(pre_pandas_cl_adj_48)
cl_adj_48_df.style.background_gradient(
axis=None,
vmin=0,
vmax=1000,
cmap="YlGnBu"
).format(precision=0)
adjective | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | ambitious | 0 | 1 | 359 | 0 | 0 | 10 | 4 | 12 | 2 | 21 | 0 | 0 | 18 | 162 | 2 | 0 | 255 | 6 | 3 | 24 | 0 | 20 | 4 | 0 | 0 | 200 | 2 | 2 | 5 | 0 | 7 | 0 | 6 | 56 | 0 | 0 | 2 | 0 | 3 | 10 | 2 | 2 | 191 | 0 | 85 | 2 | 20 | 2 |
1 | assertive | 0 | 0 | 674 | 0 | 0 | 0 | 1 | 5 | 1 | 6 | 0 | 0 | 7 | 137 | 0 | 0 | 158 | 3 | 1 | 6 | 0 | 4 | 1 | 0 | 0 | 244 | 1 | 17 | 0 | 0 | 19 | 0 | 12 | 43 | 0 | 0 | 0 | 0 | 5 | 13 | 11 | 3 | 70 | 3 | 42 | 0 | 11 | 2 |
2 | committed | 0 | 0 | 475 | 0 | 0 | 3 | 0 | 6 | 2 | 16 | 0 | 0 | 19 | 200 | 8 | 0 | 214 | 1 | 1 | 19 | 0 | 2 | 1 | 0 | 0 | 136 | 0 | 4 | 5 | 0 | 38 | 0 | 0 | 41 | 0 | 1 | 1 | 0 | 3 | 15 | 6 | 0 | 153 | 1 | 92 | 2 | 34 | 1 |
3 | compassionate | 0 | 0 | 140 | 0 | 0 | 2 | 5 | 16 | 1 | 34 | 0 | 0 | 23 | 183 | 3 | 0 | 310 | 6 | 0 | 9 | 0 | 4 | 0 | 0 | 0 | 194 | 5 | 8 | 5 | 0 | 71 | 1 | 0 | 64 | 0 | 0 | 1 | 0 | 2 | 23 | 1 | 0 | 222 | 0 | 129 | 0 | 37 | 1 |
4 | confident | 0 | 0 | 182 | 0 | 0 | 8 | 5 | 17 | 2 | 30 | 0 | 0 | 26 | 248 | 3 | 0 | 296 | 1 | 3 | 33 | 0 | 15 | 2 | 0 | 0 | 118 | 2 | 1 | 8 | 0 | 28 | 0 | 1 | 36 | 0 | 0 | 2 | 0 | 2 | 18 | 1 | 7 | 277 | 0 | 100 | 0 | 22 | 6 |
5 | considerate | 0 | 0 | 296 | 0 | 0 | 0 | 1 | 9 | 3 | 25 | 1 | 0 | 29 | 123 | 1 | 0 | 308 | 0 | 0 | 19 | 0 | 10 | 0 | 0 | 0 | 191 | 6 | 7 | 4 | 0 | 36 | 0 | 1 | 55 | 0 | 0 | 2 | 0 | 1 | 17 | 4 | 2 | 221 | 1 | 82 | 0 | 43 | 2 |
6 | decisive | 0 | 0 | 748 | 0 | 0 | 0 | 1 | 4 | 2 | 2 | 0 | 0 | 16 | 113 | 0 | 0 | 188 | 0 | 4 | 13 | 0 | 14 | 0 | 0 | 0 | 162 | 2 | 9 | 0 | 0 | 29 | 1 | 6 | 26 | 0 | 0 | 0 | 0 | 5 | 15 | 3 | 0 | 74 | 2 | 52 | 0 | 4 | 5 |
7 | determined | 0 | 0 | 667 | 0 | 0 | 0 | 1 | 13 | 4 | 5 | 1 | 0 | 12 | 192 | 1 | 0 | 157 | 14 | 0 | 7 | 0 | 20 | 5 | 0 | 1 | 167 | 2 | 21 | 1 | 0 | 20 | 0 | 18 | 45 | 0 | 0 | 0 | 0 | 7 | 12 | 13 | 5 | 32 | 2 | 40 | 0 | 4 | 11 |
8 | emotional | 0 | 0 | 582 | 0 | 0 | 1 | 0 | 5 | 0 | 16 | 0 | 0 | 27 | 63 | 0 | 0 | 232 | 0 | 3 | 1 | 0 | 0 | 0 | 0 | 2 | 220 | 4 | 4 | 10 | 0 | 13 | 0 | 8 | 50 | 0 | 1 | 0 | 0 | 2 | 43 | 7 | 0 | 122 | 0 | 63 | 0 | 20 | 1 |
9 | gentle | 0 | 0 | 441 | 0 | 0 | 0 | 5 | 4 | 1 | 13 | 0 | 0 | 15 | 135 | 2 | 0 | 212 | 4 | 1 | 8 | 0 | 8 | 0 | 0 | 0 | 257 | 2 | 6 | 1 | 0 | 26 | 1 | 0 | 38 | 0 | 0 | 0 | 0 | 0 | 35 | 5 | 0 | 182 | 0 | 78 | 0 | 18 | 2 |
10 | honest | 0 | 0 | 508 | 0 | 0 | 2 | 5 | 13 | 0 | 11 | 0 | 0 | 38 | 121 | 4 | 0 | 190 | 3 | 2 | 8 | 0 | 13 | 0 | 0 | 0 | 186 | 5 | 1 | 2 | 0 | 71 | 1 | 0 | 33 | 0 | 0 | 0 | 0 | 2 | 33 | 4 | 0 | 169 | 2 | 57 | 1 | 12 | 3 |
11 | intellectual | 0 | 0 | 381 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 17 | 114 | 0 | 0 | 158 | 0 | 0 | 4 | 0 | 1 | 0 | 0 | 0 | 21 | 2 | 3 | 5 | 0 | 183 | 0 | 0 | 109 | 0 | 0 | 0 | 0 | 16 | 10 | 1 | 0 | 79 | 1 | 389 | 0 | 3 | 1 |
12 | modest | 0 | 0 | 620 | 0 | 0 | 1 | 0 | 3 | 4 | 1 | 0 | 0 | 19 | 66 | 5 | 0 | 205 | 0 | 1 | 10 | 0 | 10 | 0 | 0 | 0 | 197 | 0 | 3 | 1 | 0 | 51 | 0 | 0 | 41 | 0 | 0 | 0 | 0 | 4 | 36 | 8 | 1 | 165 | 1 | 43 | 2 | 1 | 1 |
13 | no_adjective | 0 | 0 | 373 | 0 | 0 | 1 | 2 | 4 | 9 | 19 | 0 | 0 | 27 | 85 | 13 | 0 | 262 | 7 | 3 | 13 | 0 | 11 | 2 | 0 | 0 | 242 | 4 | 1 | 3 | 0 | 38 | 0 | 0 | 63 | 0 | 0 | 0 | 0 | 7 | 22 | 5 | 3 | 172 | 3 | 76 | 1 | 26 | 3 |
14 | outspoken | 0 | 0 | 704 | 0 | 0 | 2 | 1 | 4 | 1 | 5 | 0 | 0 | 12 | 79 | 3 | 0 | 194 | 7 | 5 | 0 | 0 | 2 | 1 | 0 | 1 | 106 | 4 | 3 | 14 | 0 | 43 | 0 | 14 | 42 | 0 | 1 | 1 | 0 | 8 | 36 | 13 | 1 | 71 | 0 | 108 | 0 | 11 | 3 |
15 | pleasant | 0 | 0 | 136 | 0 | 0 | 0 | 3 | 7 | 2 | 32 | 0 | 0 | 38 | 77 | 0 | 0 | 327 | 0 | 0 | 11 | 0 | 9 | 0 | 0 | 0 | 267 | 4 | 0 | 3 | 0 | 36 | 0 | 0 | 49 | 0 | 0 | 1 | 0 | 0 | 21 | 2 | 1 | 348 | 1 | 78 | 0 | 45 | 2 |
16 | self-confident | 0 | 0 | 302 | 0 | 0 | 1 | 2 | 10 | 0 | 16 | 0 | 0 | 27 | 164 | 1 | 0 | 261 | 0 | 3 | 22 | 0 | 8 | 1 | 0 | 0 | 222 | 3 | 7 | 0 | 0 | 15 | 0 | 1 | 54 | 0 | 0 | 0 | 0 | 3 | 15 | 4 | 0 | 224 | 1 | 99 | 0 | 33 | 1 |
17 | sensitive | 0 | 0 | 576 | 1 | 0 | 0 | 1 | 4 | 0 | 9 | 0 | 0 | 9 | 174 | 0 | 0 | 149 | 0 | 3 | 3 | 0 | 11 | 0 | 0 | 0 | 253 | 2 | 27 | 1 | 0 | 32 | 0 | 11 | 55 | 0 | 0 | 0 | 0 | 2 | 21 | 6 | 1 | 56 | 1 | 75 | 0 | 14 | 3 |
18 | stubborn | 0 | 0 | 1006 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0 | 0 | 71 | 0 | 6 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0 | 27 | 2 | 0 | 24 | 2 | 77 | 29 | 0 | 0 | 0 | 0 | 3 | 13 | 50 | 2 | 0 | 0 | 30 | 0 | 0 | 3 |
19 | supportive | 0 | 0 | 109 | 0 | 0 | 2 | 11 | 16 | 1 | 46 | 3 | 0 | 27 | 97 | 2 | 0 | 335 | 1 | 0 | 14 | 0 | 2 | 0 | 0 | 0 | 244 | 9 | 2 | 3 | 0 | 32 | 0 | 0 | 78 | 0 | 0 | 0 | 0 | 2 | 12 | 1 | 0 | 259 | 0 | 126 | 0 | 63 | 3 |
20 | unreasonable | 0 | 0 | 1008 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 3 | 78 | 0 | 0 | 104 | 0 | 7 | 1 | 0 | 0 | 0 | 0 | 1 | 98 | 0 | 10 | 5 | 0 | 6 | 1 | 38 | 24 | 0 | 3 | 0 | 0 | 7 | 15 | 23 | 0 | 15 | 0 | 51 | 0 | 0 | 1 |
# Only show clusters that have at least 150 assigned examples
s_cols = [True] + list(cl_adj_48_df.sum(axis=0)[1:] > 150)
cl_adj_48_df[1:].loc[:, s_cols].style.background_gradient(
axis=None,
vmin=0,
vmax=1000,
cmap="YlGnBu"
).format(precision=0)
adjective | 2 | 7 | 9 | 12 | 13 | 16 | 19 | 21 | 25 | 27 | 30 | 32 | 33 | 39 | 40 | 42 | 44 | 46 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1 | assertive | 674 | 5 | 6 | 7 | 137 | 158 | 6 | 4 | 244 | 17 | 19 | 12 | 43 | 13 | 11 | 70 | 42 | 11 |
2 | committed | 475 | 6 | 16 | 19 | 200 | 214 | 19 | 2 | 136 | 4 | 38 | 0 | 41 | 15 | 6 | 153 | 92 | 34 |
3 | compassionate | 140 | 16 | 34 | 23 | 183 | 310 | 9 | 4 | 194 | 8 | 71 | 0 | 64 | 23 | 1 | 222 | 129 | 37 |
4 | confident | 182 | 17 | 30 | 26 | 248 | 296 | 33 | 15 | 118 | 1 | 28 | 1 | 36 | 18 | 1 | 277 | 100 | 22 |
5 | considerate | 296 | 9 | 25 | 29 | 123 | 308 | 19 | 10 | 191 | 7 | 36 | 1 | 55 | 17 | 4 | 221 | 82 | 43 |
6 | decisive | 748 | 4 | 2 | 16 | 113 | 188 | 13 | 14 | 162 | 9 | 29 | 6 | 26 | 15 | 3 | 74 | 52 | 4 |
7 | determined | 667 | 13 | 5 | 12 | 192 | 157 | 7 | 20 | 167 | 21 | 20 | 18 | 45 | 12 | 13 | 32 | 40 | 4 |
8 | emotional | 582 | 5 | 16 | 27 | 63 | 232 | 1 | 0 | 220 | 4 | 13 | 8 | 50 | 43 | 7 | 122 | 63 | 20 |
9 | gentle | 441 | 4 | 13 | 15 | 135 | 212 | 8 | 8 | 257 | 6 | 26 | 0 | 38 | 35 | 5 | 182 | 78 | 18 |
10 | honest | 508 | 13 | 11 | 38 | 121 | 190 | 8 | 13 | 186 | 1 | 71 | 0 | 33 | 33 | 4 | 169 | 57 | 12 |
11 | intellectual | 381 | 0 | 2 | 17 | 114 | 158 | 4 | 1 | 21 | 3 | 183 | 0 | 109 | 10 | 1 | 79 | 389 | 3 |
12 | modest | 620 | 3 | 1 | 19 | 66 | 205 | 10 | 10 | 197 | 3 | 51 | 0 | 41 | 36 | 8 | 165 | 43 | 1 |
13 | no_adjective | 373 | 4 | 19 | 27 | 85 | 262 | 13 | 11 | 242 | 1 | 38 | 0 | 63 | 22 | 5 | 172 | 76 | 26 |
14 | outspoken | 704 | 4 | 5 | 12 | 79 | 194 | 0 | 2 | 106 | 3 | 43 | 14 | 42 | 36 | 13 | 71 | 108 | 11 |
15 | pleasant | 136 | 7 | 32 | 38 | 77 | 327 | 11 | 9 | 267 | 0 | 36 | 0 | 49 | 21 | 2 | 348 | 78 | 45 |
16 | self-confident | 302 | 10 | 16 | 27 | 164 | 261 | 22 | 8 | 222 | 7 | 15 | 1 | 54 | 15 | 4 | 224 | 99 | 33 |
17 | sensitive | 576 | 4 | 9 | 9 | 174 | 149 | 3 | 11 | 253 | 27 | 32 | 11 | 55 | 21 | 6 | 56 | 75 | 14 |
18 | stubborn | 1006 | 0 | 0 | 0 | 64 | 71 | 0 | 0 | 91 | 27 | 24 | 77 | 29 | 13 | 50 | 0 | 30 | 0 |
19 | supportive | 109 | 16 | 46 | 27 | 97 | 335 | 14 | 2 | 244 | 2 | 32 | 0 | 78 | 12 | 1 | 259 | 126 | 63 |
20 | unreasonable | 1008 | 0 | 1 | 3 | 78 | 104 | 1 | 0 | 98 | 10 | 6 | 38 | 24 | 15 | 23 | 15 | 51 | 0 |
# Composition of the clusters with at least 150 assigned examples
meaningful_clusters = [i for i, b in enumerate(list(cl_adj_48_df.sum(axis=0)[1:] > 150)) if b]
pre_pandas_clusters = []
for cl_id in meaningful_clusters:
dct = clusters_48[cl_id]
line_dct = {
"cluster_id": cl_id,
# "total": len(dct["ids"]),
}
gdr_dct = dict(dct["labels_gender"])
for k in ["woman", "man", "person"]:
line_dct[k] = gdr_dct.get(k, 0)
eth_dct = dict(dct["labels_ethnicity"])
for k in ethnicities_ordered:
line_dct[k] = eth_dct.get(k, 0)
mod_dct = dict(dct["labels_model"])
# for k in ["DallE", "SD_2", "SD_14"]:
# line_dct[k] = mod_dct.get(k, 0)
pre_pandas_clusters += [line_dct]
print("#### Visualizing cluster compositions")
clusters_df = pd.DataFrame.from_dict(
pre_pandas_clusters,
)
clusters_df.style.background_gradient(axis=None, vmin=0, vmax=50, cmap="YlGnBu")
#### Visualizing cluster compositions
cluster_id | woman | man | person | Caucasian | White | Multiracial | Black | African-American | Asian | South Asian | Southeast Asian | East Asian | Pacific Islander | Latino | Latinx | Hispanic | First Nations | Native American | American Indian | Indigenous American | ||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 2 | 0 | 29 | 28 | 20 | 20 | 17 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1 | 7 | 44 | 0 | 3 | 0 | 0 | 0 | 3 | 0 | 0 | 0 | 9 | 1 | 0 | 9 | 8 | 7 | 9 | 1 | 0 | 0 | 0 |
2 | 9 | 34 | 0 | 12 | 6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 12 | 20 | 6 | 0 | 0 | 0 | 0 |
3 | 12 | 0 | 13 | 25 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 2 | 0 | 12 | 7 | 7 | 8 | 0 | 1 | 0 | 0 |
4 | 13 | 0 | 19 | 18 | 7 | 16 | 13 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
5 | 16 | 0 | 24 | 7 | 8 | 5 | 2 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 7 | 3 | 3 | 0 | 0 | 0 | 0 |
6 | 19 | 0 | 17 | 12 | 0 | 0 | 1 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 9 | 12 | 2 | 1 | 0 | 0 | 0 |
7 | 21 | 0 | 22 | 7 | 0 | 0 | 0 | 5 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 6 | 1 | 6 | 0 | 0 | 0 | 0 |
8 | 25 | 26 | 0 | 2 | 10 | 6 | 6 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 0 | 0 | 3 | 0 | 0 | 0 | 0 | 0 |
9 | 27 | 23 | 0 | 2 | 5 | 5 | 8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 5 | 1 | 0 | 0 | 0 | 0 | 0 |
10 | 30 | 0 | 19 | 3 | 9 | 3 | 10 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
11 | 32 | 21 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0 | 1 | 2 | 2 | 0 | 7 | 3 | 0 | 2 |
12 | 33 | 18 | 0 | 4 | 1 | 4 | 7 | 0 | 0 | 0 | 0 | 2 | 1 | 0 | 1 | 1 | 4 | 0 | 1 | 0 | 0 | 0 |
13 | 39 | 0 | 14 | 5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 4 | 0 | 0 | 4 | 2 | 2 | 5 |
14 | 40 | 0 | 10 | 9 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 2 | 1 | 3 | 0 | 0 | 4 | 3 | 1 | 2 |
15 | 42 | 0 | 5 | 13 | 1 | 10 | 7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
16 | 44 | 0 | 5 | 12 | 9 | 2 | 4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 |
17 | 46 | 15 | 0 | 1 | 3 | 7 | 6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
adjectives_to_cluster_12 = {}
for dct in prof_list_with_clusters:
if dct["model"] == "SD_14":
adj = dct["prompt_adjective"]
cl_id = dct["cluster_id_12"]
adjectives_to_cluster_12[adj] = adjectives_to_cluster_12.get(
adj,
dict([("adjective", adj)] + [(i, 0) for i in range(12)])
)
adjectives_to_cluster_12[adj][cl_id] += 1
pre_pandas_cl_adj_12 = sorted(adjectives_to_cluster_12.values(), key=lambda x:x["adjective"])
cl_adj_12_df = pd.DataFrame.from_dict(pre_pandas_cl_adj_12)
cl_adj_12_df.style.background_gradient(
axis=None,
vmin=0,
vmax=500,
cmap="YlGnBu"
).format(precision=0)
adjective | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 700 | 41 | 54 | 445 | 177 | 5 | 0 | 10 | 17 | 31 | 15 | 5 | |
1 | ambitious | 540 | 126 | 101 | 369 | 240 | 0 | 0 | 15 | 28 | 20 | 49 | 12 |
2 | assertive | 675 | 34 | 90 | 494 | 141 | 3 | 0 | 7 | 14 | 26 | 6 | 10 |
3 | committed | 652 | 31 | 64 | 523 | 152 | 10 | 0 | 10 | 15 | 31 | 7 | 5 |
4 | compassionate | 336 | 30 | 140 | 734 | 158 | 20 | 1 | 26 | 26 | 14 | 7 | 8 |
5 | confident | 1152 | 78 | 170 | 926 | 518 | 2 | 0 | 26 | 22 | 50 | 44 | 12 |
6 | considerate | 522 | 45 | 67 | 505 | 208 | 11 | 0 | 17 | 48 | 23 | 50 | 4 |
7 | decisive | 802 | 20 | 39 | 452 | 107 | 4 | 0 | 3 | 16 | 40 | 15 | 2 |
8 | determined | 663 | 67 | 89 | 438 | 39 | 23 | 0 | 34 | 50 | 70 | 11 | 16 |
9 | emotional | 557 | 26 | 80 | 541 | 26 | 42 | 0 | 22 | 140 | 58 | 3 | 5 |
10 | gentle | 669 | 28 | 60 | 494 | 144 | 31 | 0 | 7 | 29 | 14 | 19 | 5 |
11 | honest | 706 | 40 | 56 | 418 | 139 | 40 | 0 | 10 | 16 | 49 | 10 | 16 |
12 | intellectual | 863 | 74 | 58 | 350 | 98 | 5 | 0 | 5 | 3 | 16 | 13 | 15 |
13 | modest | 566 | 71 | 60 | 358 | 287 | 11 | 0 | 51 | 18 | 33 | 14 | 31 |
14 | outspoken | 646 | 121 | 117 | 420 | 80 | 20 | 0 | 28 | 16 | 35 | 8 | 9 |
15 | pleasant | 661 | 20 | 56 | 459 | 235 | 4 | 0 | 5 | 20 | 9 | 29 | 2 |
16 | sensitive | 628 | 13 | 42 | 568 | 38 | 19 | 0 | 19 | 110 | 44 | 13 | 6 |
17 | stubborn | 842 | 47 | 21 | 381 | 24 | 24 | 0 | 20 | 47 | 69 | 16 | 9 |
18 | supportive | 497 | 35 | 155 | 646 | 121 | 0 | 0 | 9 | 25 | 2 | 8 | 2 |
19 | unreasonable | 985 | 11 | 14 | 335 | 69 | 1 | 0 | 3 | 18 | 38 | 21 | 5 |
# Only show clusters that have at least 150 assigned examples
s_cols = [True] + list(cl_adj_12_df.sum(axis=0)[1:] > 150)
cl_adj_12_df[1:].loc[:, s_cols].style.background_gradient(
axis=None,
vmin=0,
vmax=1000,
cmap="YlGnBu"
).format(precision=0)
adjective | 0 | 1 | 2 | 3 | 4 | 5 | 7 | 8 | 9 | 10 | 11 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|
1 | ambitious | 540 | 126 | 101 | 369 | 240 | 0 | 15 | 28 | 20 | 49 | 12 |
2 | assertive | 675 | 34 | 90 | 494 | 141 | 3 | 7 | 14 | 26 | 6 | 10 |
3 | committed | 652 | 31 | 64 | 523 | 152 | 10 | 10 | 15 | 31 | 7 | 5 |
4 | compassionate | 336 | 30 | 140 | 734 | 158 | 20 | 26 | 26 | 14 | 7 | 8 |
5 | confident | 1152 | 78 | 170 | 926 | 518 | 2 | 26 | 22 | 50 | 44 | 12 |
6 | considerate | 522 | 45 | 67 | 505 | 208 | 11 | 17 | 48 | 23 | 50 | 4 |
7 | decisive | 802 | 20 | 39 | 452 | 107 | 4 | 3 | 16 | 40 | 15 | 2 |
8 | determined | 663 | 67 | 89 | 438 | 39 | 23 | 34 | 50 | 70 | 11 | 16 |
9 | emotional | 557 | 26 | 80 | 541 | 26 | 42 | 22 | 140 | 58 | 3 | 5 |
10 | gentle | 669 | 28 | 60 | 494 | 144 | 31 | 7 | 29 | 14 | 19 | 5 |
11 | honest | 706 | 40 | 56 | 418 | 139 | 40 | 10 | 16 | 49 | 10 | 16 |
12 | intellectual | 863 | 74 | 58 | 350 | 98 | 5 | 5 | 3 | 16 | 13 | 15 |
13 | modest | 566 | 71 | 60 | 358 | 287 | 11 | 51 | 18 | 33 | 14 | 31 |
14 | outspoken | 646 | 121 | 117 | 420 | 80 | 20 | 28 | 16 | 35 | 8 | 9 |
15 | pleasant | 661 | 20 | 56 | 459 | 235 | 4 | 5 | 20 | 9 | 29 | 2 |
16 | sensitive | 628 | 13 | 42 | 568 | 38 | 19 | 19 | 110 | 44 | 13 | 6 |
17 | stubborn | 842 | 47 | 21 | 381 | 24 | 24 | 20 | 47 | 69 | 16 | 9 |
18 | supportive | 497 | 35 | 155 | 646 | 121 | 0 | 9 | 25 | 2 | 8 | 2 |
19 | unreasonable | 985 | 11 | 14 | 335 | 69 | 1 | 3 | 18 | 38 | 21 | 5 |
# Composition of the clusters with at least 150 assigned examples
meaningful_clusters = [i for i, b in enumerate(list(cl_adj_12_df.sum(axis=0)[1:] > 150)) if b]
pre_pandas_clusters = []
for cl_id in meaningful_clusters:
dct = clusters_12[cl_id]
line_dct = {
"cluster_id": cl_id,
# "total": len(dct["ids"]),
}
gdr_dct = dict(dct["labels_gender"])
for k in ["woman", "man", "person"]:
line_dct[k] = gdr_dct.get(k, 0)
eth_dct = dict(dct["labels_ethnicity"])
for k in ethnicities_ordered:
line_dct[k] = eth_dct.get(k, 0)
mod_dct = dict(dct["labels_model"])
# for k in ["DallE", "SD_2", "SD_14"]:
# line_dct[k] = mod_dct.get(k, 0)
pre_pandas_clusters += [line_dct]
print("#### Visualizing cluster compositions")
clusters_df = pd.DataFrame.from_dict(
pre_pandas_clusters,
)
clusters_df.style.background_gradient(axis=None, vmin=0, vmax=50, cmap="YlGnBu")
#### Visualizing cluster compositions
cluster_id | woman | man | person | Caucasian | White | Multiracial | Black | African-American | Asian | South Asian | Southeast Asian | East Asian | Pacific Islander | Latino | Latinx | Hispanic | First Nations | Native American | American Indian | Indigenous American | ||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 0 | 0 | 97 | 76 | 52 | 48 | 52 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 7 | 6 | 5 | 0 | 0 | 0 | 0 |
1 | 1 | 1 | 84 | 66 | 1 | 1 | 3 | 35 | 52 | 52 | 0 | 0 | 0 | 0 | 2 | 1 | 2 | 2 | 0 | 0 | 0 | 0 |
2 | 2 | 109 | 0 | 32 | 8 | 5 | 6 | 36 | 28 | 32 | 0 | 0 | 0 | 0 | 6 | 6 | 9 | 5 | 0 | 0 | 0 | 0 |
3 | 3 | 111 | 0 | 20 | 24 | 24 | 27 | 0 | 0 | 0 | 0 | 2 | 2 | 1 | 3 | 15 | 27 | 5 | 1 | 0 | 0 | 0 |
4 | 4 | 0 | 75 | 54 | 3 | 9 | 2 | 6 | 0 | 0 | 0 | 0 | 2 | 0 | 23 | 25 | 15 | 23 | 9 | 6 | 3 | 3 |
5 | 5 | 56 | 35 | 35 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 23 | 24 | 40 |
6 | 7 | 109 | 0 | 6 | 0 | 0 | 0 | 5 | 8 | 6 | 0 | 30 | 1 | 0 | 9 | 9 | 9 | 10 | 2 | 8 | 10 | 8 |
7 | 8 | 89 | 0 | 23 | 2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 40 | 21 | 4 | 1 | 0 | 7 | 2 | 0 | 6 |
8 | 9 | 0 | 52 | 54 | 0 | 1 | 0 | 5 | 1 | 0 | 0 | 0 | 16 | 3 | 11 | 10 | 6 | 4 | 10 | 11 | 13 | 15 |
9 | 10 | 0 | 57 | 43 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 46 | 13 | 0 | 1 | 0 | 0 | 0 | 0 | 0 |
10 | 11 | 0 | 52 | 43 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 58 | 0 | 0 | 2 | 13 | 14 | 6 | 0 | 0 | 0 | 0 |
# Example use: look at cluster assignment per adjective for DallE
adjectives_to_cluster_24 = {}
for dct in prof_list_with_clusters:
if dct["model"] == "SD_14":
adj = dct["prompt_adjective"]
cl_id = dct["cluster_id_24"]
adjectives_to_cluster_24[adj] = adjectives_to_cluster_24.get(
adj,
dict([("adjective", adj)] + [(i, 0) for i in range(24)])
)
adjectives_to_cluster_24[adj][cl_id] += 1
pre_pandas_cl_adj_24 = sorted(adjectives_to_cluster_24.values(), key=lambda x:x["adjective"])
cl_adj_24_df = pd.DataFrame.from_dict(pre_pandas_cl_adj_24)
cl_adj_24_df.style.background_gradient(
axis=None,
vmin=0,
vmax=500,
cmap="YlGnBu"
).format(precision=0)
adjective | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 10 | 525 | 1 | 30 | 0 | 0 | 138 | 12 | 17 | 73 | 303 | 11 | 13 | 21 | 2 | 23 | 9 | 3 | 21 | 16 | 48 | 15 | 0 | 209 | |
1 | ambitious | 13 | 419 | 9 | 78 | 0 | 0 | 115 | 5 | 62 | 52 | 246 | 12 | 5 | 55 | 3 | 30 | 22 | 50 | 26 | 2 | 28 | 40 | 0 | 228 |
2 | assertive | 6 | 461 | 6 | 54 | 0 | 0 | 181 | 7 | 17 | 117 | 302 | 11 | 5 | 18 | 4 | 54 | 5 | 6 | 10 | 6 | 40 | 11 | 0 | 179 |
3 | committed | 10 | 530 | 2 | 33 | 0 | 1 | 130 | 14 | 13 | 45 | 377 | 14 | 1 | 24 | 2 | 41 | 9 | 5 | 14 | 16 | 31 | 11 | 0 | 177 |
4 | compassionate | 18 | 285 | 3 | 69 | 0 | 5 | 134 | 32 | 22 | 10 | 579 | 5 | 6 | 12 | 1 | 95 | 4 | 2 | 6 | 23 | 31 | 5 | 1 | 152 |
5 | confident | 22 | 872 | 10 | 91 | 0 | 0 | 242 | 2 | 45 | 77 | 653 | 18 | 16 | 78 | 2 | 107 | 23 | 33 | 25 | 12 | 62 | 19 | 0 | 591 |
6 | considerate | 17 | 402 | 2 | 47 | 0 | 1 | 157 | 17 | 22 | 37 | 345 | 16 | 20 | 42 | 3 | 23 | 37 | 34 | 16 | 16 | 30 | 11 | 0 | 205 |
7 | decisive | 4 | 565 | 2 | 16 | 0 | 0 | 183 | 8 | 4 | 172 | 258 | 13 | 8 | 15 | 6 | 39 | 6 | 10 | 9 | 14 | 33 | 11 | 0 | 124 |
8 | determined | 31 | 427 | 9 | 62 | 0 | 3 | 304 | 39 | 9 | 191 | 132 | 22 | 5 | 12 | 13 | 39 | 9 | 9 | 28 | 31 | 71 | 36 | 0 | 18 |
9 | emotional | 15 | 265 | 2 | 38 | 0 | 7 | 418 | 112 | 1 | 235 | 159 | 9 | 1 | 1 | 8 | 49 | 12 | 0 | 8 | 49 | 69 | 20 | 0 | 22 |
10 | gentle | 8 | 522 | 4 | 35 | 0 | 0 | 169 | 19 | 9 | 40 | 315 | 9 | 9 | 20 | 1 | 32 | 19 | 3 | 6 | 63 | 33 | 16 | 0 | 168 |
11 | honest | 8 | 516 | 9 | 20 | 0 | 10 | 149 | 12 | 16 | 99 | 264 | 12 | 7 | 23 | 5 | 39 | 9 | 3 | 16 | 64 | 66 | 14 | 0 | 139 |
12 | intellectual | 5 | 652 | 11 | 41 | 0 | 0 | 158 | 0 | 22 | 129 | 188 | 22 | 2 | 23 | 1 | 21 | 5 | 16 | 26 | 11 | 38 | 29 | 0 | 100 |
13 | modest | 45 | 471 | 19 | 49 | 0 | 1 | 118 | 14 | 28 | 28 | 233 | 10 | 12 | 40 | 2 | 21 | 6 | 3 | 33 | 31 | 58 | 17 | 0 | 261 |
14 | outspoken | 17 | 482 | 5 | 98 | 0 | 4 | 242 | 19 | 24 | 86 | 176 | 7 | 3 | 13 | 3 | 31 | 4 | 6 | 34 | 37 | 80 | 62 | 0 | 67 |
15 | pleasant | 4 | 520 | 0 | 28 | 0 | 0 | 105 | 5 | 19 | 26 | 350 | 2 | 14 | 16 | 0 | 34 | 14 | 14 | 7 | 11 | 32 | 3 | 0 | 296 |
16 | sensitive | 12 | 382 | 4 | 23 | 0 | 0 | 426 | 74 | 2 | 187 | 158 | 20 | 4 | 11 | 2 | 31 | 25 | 12 | 1 | 36 | 48 | 16 | 0 | 26 |
17 | stubborn | 17 | 419 | 5 | 15 | 0 | 1 | 354 | 33 | 2 | 377 | 43 | 25 | 5 | 3 | 5 | 11 | 5 | 10 | 18 | 42 | 67 | 34 | 0 | 9 |
18 | supportive | 3 | 393 | 1 | 88 | 0 | 0 | 141 | 3 | 20 | 12 | 493 | 3 | 6 | 13 | 1 | 83 | 23 | 5 | 17 | 2 | 5 | 1 | 0 | 187 |
19 | unreasonable | 2 | 621 | 4 | 7 | 0 | 0 | 201 | 8 | 11 | 271 | 130 | 8 | 7 | 6 | 4 | 16 | 9 | 19 | 3 | 11 | 55 | 4 | 0 | 103 |
# Only show clusters that have at least 150 assigned examples
s_cols = [True] + list(cl_adj_24_df.sum(axis=0)[1:] > 150)
cl_adj_24_df[1:].loc[:, s_cols].style.background_gradient(
axis=None,
vmin=0,
vmax=1000,
cmap="YlGnBu"
).format(precision=0)
adjective | 0 | 1 | 3 | 6 | 7 | 8 | 9 | 10 | 11 | 13 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 23 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1 | ambitious | 13 | 419 | 78 | 115 | 5 | 62 | 52 | 246 | 12 | 55 | 30 | 22 | 50 | 26 | 2 | 28 | 40 | 228 |
2 | assertive | 6 | 461 | 54 | 181 | 7 | 17 | 117 | 302 | 11 | 18 | 54 | 5 | 6 | 10 | 6 | 40 | 11 | 179 |
3 | committed | 10 | 530 | 33 | 130 | 14 | 13 | 45 | 377 | 14 | 24 | 41 | 9 | 5 | 14 | 16 | 31 | 11 | 177 |
4 | compassionate | 18 | 285 | 69 | 134 | 32 | 22 | 10 | 579 | 5 | 12 | 95 | 4 | 2 | 6 | 23 | 31 | 5 | 152 |
5 | confident | 22 | 872 | 91 | 242 | 2 | 45 | 77 | 653 | 18 | 78 | 107 | 23 | 33 | 25 | 12 | 62 | 19 | 591 |
6 | considerate | 17 | 402 | 47 | 157 | 17 | 22 | 37 | 345 | 16 | 42 | 23 | 37 | 34 | 16 | 16 | 30 | 11 | 205 |
7 | decisive | 4 | 565 | 16 | 183 | 8 | 4 | 172 | 258 | 13 | 15 | 39 | 6 | 10 | 9 | 14 | 33 | 11 | 124 |
8 | determined | 31 | 427 | 62 | 304 | 39 | 9 | 191 | 132 | 22 | 12 | 39 | 9 | 9 | 28 | 31 | 71 | 36 | 18 |
9 | emotional | 15 | 265 | 38 | 418 | 112 | 1 | 235 | 159 | 9 | 1 | 49 | 12 | 0 | 8 | 49 | 69 | 20 | 22 |
10 | gentle | 8 | 522 | 35 | 169 | 19 | 9 | 40 | 315 | 9 | 20 | 32 | 19 | 3 | 6 | 63 | 33 | 16 | 168 |
11 | honest | 8 | 516 | 20 | 149 | 12 | 16 | 99 | 264 | 12 | 23 | 39 | 9 | 3 | 16 | 64 | 66 | 14 | 139 |
12 | intellectual | 5 | 652 | 41 | 158 | 0 | 22 | 129 | 188 | 22 | 23 | 21 | 5 | 16 | 26 | 11 | 38 | 29 | 100 |
13 | modest | 45 | 471 | 49 | 118 | 14 | 28 | 28 | 233 | 10 | 40 | 21 | 6 | 3 | 33 | 31 | 58 | 17 | 261 |
14 | outspoken | 17 | 482 | 98 | 242 | 19 | 24 | 86 | 176 | 7 | 13 | 31 | 4 | 6 | 34 | 37 | 80 | 62 | 67 |
15 | pleasant | 4 | 520 | 28 | 105 | 5 | 19 | 26 | 350 | 2 | 16 | 34 | 14 | 14 | 7 | 11 | 32 | 3 | 296 |
16 | sensitive | 12 | 382 | 23 | 426 | 74 | 2 | 187 | 158 | 20 | 11 | 31 | 25 | 12 | 1 | 36 | 48 | 16 | 26 |
17 | stubborn | 17 | 419 | 15 | 354 | 33 | 2 | 377 | 43 | 25 | 3 | 11 | 5 | 10 | 18 | 42 | 67 | 34 | 9 |
18 | supportive | 3 | 393 | 88 | 141 | 3 | 20 | 12 | 493 | 3 | 13 | 83 | 23 | 5 | 17 | 2 | 5 | 1 | 187 |
19 | unreasonable | 2 | 621 | 7 | 201 | 8 | 11 | 271 | 130 | 8 | 6 | 16 | 9 | 19 | 3 | 11 | 55 | 4 | 103 |
# Composition of the clusters with at least 150 assigned examples
meaningful_clusters = [i for i, b in enumerate(list(cl_adj_24_df.sum(axis=0)[1:] > 150)) if b]
pre_pandas_clusters = []
for cl_id in meaningful_clusters:
dct = clusters_24[cl_id]
line_dct = {
"cluster_id": cl_id,
# "total": len(dct["ids"]),
}
gdr_dct = dict(dct["labels_gender"])
for k in ["woman", "man", "person"]:
line_dct[k] = gdr_dct.get(k, 0)
eth_dct = dict(dct["labels_ethnicity"])
for k in ethnicities_ordered:
line_dct[k] = eth_dct.get(k, 0)
mod_dct = dict(dct["labels_model"])
# for k in ["DallE", "SD_2", "SD_14"]:
# line_dct[k] = mod_dct.get(k, 0)
pre_pandas_clusters += [line_dct]
print("#### Visualizing cluster compositions")
clusters_df = pd.DataFrame.from_dict(
pre_pandas_clusters,
)
clusters_df.style.background_gradient(axis=None, vmin=0, vmax=50, cmap="YlGnBu")
#### Visualizing cluster compositions
cluster_id | woman | man | person | Caucasian | White | Multiracial | Black | African-American | Asian | South Asian | Southeast Asian | East Asian | Pacific Islander | Latino | Latinx | Hispanic | First Nations | Native American | American Indian | Indigenous American | ||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 0 | 109 | 0 | 6 | 0 | 0 | 0 | 5 | 8 | 6 | 0 | 30 | 1 | 0 | 9 | 9 | 9 | 10 | 2 | 8 | 10 | 8 |
1 | 1 | 0 | 68 | 45 | 32 | 26 | 34 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 7 | 6 | 5 | 0 | 0 | 0 | 0 |
2 | 3 | 70 | 0 | 24 | 4 | 0 | 4 | 22 | 28 | 32 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 2 | 0 | 0 | 0 | 0 |
3 | 6 | 64 | 0 | 8 | 15 | 16 | 21 | 0 | 0 | 0 | 0 | 2 | 2 | 1 | 2 | 4 | 8 | 0 | 1 | 0 | 0 | 0 |
4 | 7 | 54 | 0 | 12 | 2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0 | 20 | 4 | 1 | 0 | 7 | 2 | 0 | 6 |
5 | 8 | 1 | 29 | 34 | 1 | 1 | 3 | 23 | 20 | 11 | 0 | 0 | 0 | 0 | 0 | 1 | 2 | 2 | 0 | 0 | 0 | 0 |
6 | 9 | 0 | 29 | 31 | 20 | 22 | 18 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7 | 10 | 47 | 0 | 12 | 9 | 8 | 6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 11 | 19 | 5 | 0 | 0 | 0 | 0 |
8 | 11 | 0 | 26 | 32 | 0 | 1 | 0 | 5 | 1 | 0 | 0 | 0 | 16 | 2 | 7 | 10 | 6 | 4 | 1 | 2 | 2 | 1 |
9 | 13 | 0 | 27 | 26 | 0 | 0 | 0 | 4 | 0 | 0 | 0 | 0 | 2 | 0 | 16 | 11 | 9 | 11 | 0 | 0 | 0 | 0 |
10 | 15 | 39 | 0 | 8 | 4 | 5 | 2 | 14 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 6 | 9 | 3 | 0 | 0 | 0 | 0 |
11 | 16 | 35 | 0 | 11 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 40 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
12 | 17 | 0 | 27 | 19 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 |
13 | 18 | 0 | 26 | 20 | 0 | 0 | 0 | 6 | 20 | 20 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
14 | 19 | 0 | 29 | 16 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 5 | 7 | 17 |
15 | 20 | 0 | 32 | 11 | 3 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 11 | 1 | 0 | 8 | 6 | 3 | 3 |
16 | 21 | 0 | 29 | 12 | 0 | 0 | 0 | 6 | 12 | 21 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
17 | 23 | 0 | 16 | 17 | 0 | 9 | 2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 5 | 12 | 1 | 0 | 0 | 0 |
adjectives_to_cluster_48 = {}
for dct in prof_list_with_clusters:
if dct["model"] == "SD_14":
adj = dct["prompt_adjective"]
cl_id = dct["cluster_id_48"]
adjectives_to_cluster_48[adj] = adjectives_to_cluster_48.get(
adj,
dict([("adjective", adj)] + [(i, 0) for i in range(48)])
)
adjectives_to_cluster_48[adj][cl_id] += 1
pre_pandas_cl_adj_48 = sorted(adjectives_to_cluster_48.values(), key=lambda x:x["adjective"])
cl_adj_48_df = pd.DataFrame.from_dict(pre_pandas_cl_adj_48)
cl_adj_48_df.style.background_gradient(
axis=None,
vmin=0,
vmax=1000,
cmap="YlGnBu"
).format(precision=0)
adjective | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 0 | 0 | 47 | 3 | 14 | 15 | 21 | 8 | 3 | 101 | 22 | 1 | 14 | 42 | 3 | 6 | 291 | 12 | 2 | 6 | 0 | 7 | 8 | 0 | 1 | 29 | 41 | 36 | 10 | 0 | 57 | 13 | 9 | 72 | 0 | 0 | 3 | 0 | 0 | 27 | 2 | 0 | 275 | 2 | 92 | 8 | 191 | 6 | |
1 | ambitious | 0 | 0 | 30 | 4 | 21 | 55 | 26 | 7 | 43 | 127 | 53 | 0 | 40 | 58 | 9 | 2 | 228 | 10 | 0 | 10 | 0 | 17 | 19 | 0 | 2 | 32 | 45 | 14 | 27 | 0 | 19 | 2 | 3 | 64 | 0 | 0 | 13 | 0 | 4 | 14 | 4 | 13 | 240 | 3 | 104 | 17 | 116 | 5 |
2 | assertive | 0 | 0 | 82 | 1 | 31 | 9 | 42 | 4 | 5 | 101 | 31 | 0 | 12 | 76 | 3 | 0 | 209 | 4 | 0 | 11 | 0 | 18 | 6 | 1 | 1 | 48 | 24 | 51 | 17 | 0 | 52 | 4 | 6 | 74 | 0 | 0 | 3 | 0 | 2 | 15 | 3 | 4 | 249 | 2 | 78 | 3 | 204 | 14 |
3 | committed | 0 | 0 | 29 | 0 | 13 | 8 | 33 | 5 | 4 | 121 | 24 | 0 | 9 | 53 | 2 | 8 | 236 | 7 | 1 | 4 | 0 | 9 | 6 | 1 | 0 | 21 | 31 | 23 | 11 | 0 | 96 | 9 | 3 | 86 | 3 | 0 | 2 | 0 | 3 | 25 | 1 | 3 | 215 | 0 | 116 | 13 | 252 | 14 |
4 | compassionate | 0 | 0 | 3 | 3 | 25 | 11 | 83 | 10 | 1 | 169 | 54 | 3 | 12 | 20 | 2 | 17 | 170 | 2 | 1 | 7 | 0 | 3 | 3 | 8 | 1 | 23 | 61 | 45 | 18 | 0 | 68 | 18 | 5 | 71 | 0 | 0 | 6 | 1 | 1 | 31 | 2 | 7 | 76 | 2 | 51 | 9 | 395 | 2 |
5 | confident | 0 | 1 | 49 | 10 | 64 | 27 | 92 | 16 | 20 | 312 | 36 | 0 | 52 | 89 | 5 | 0 | 457 | 12 | 0 | 18 | 0 | 22 | 6 | 0 | 3 | 61 | 122 | 35 | 21 | 0 | 78 | 7 | 4 | 130 | 0 | 0 | 13 | 0 | 3 | 37 | 2 | 9 | 601 | 8 | 194 | 21 | 348 | 15 |
6 | considerate | 0 | 0 | 28 | 11 | 18 | 16 | 21 | 10 | 26 | 131 | 32 | 2 | 33 | 31 | 13 | 11 | 254 | 12 | 1 | 10 | 0 | 9 | 6 | 1 | 0 | 45 | 46 | 37 | 16 | 0 | 50 | 11 | 1 | 72 | 0 | 1 | 3 | 0 | 1 | 19 | 6 | 6 | 195 | 3 | 76 | 30 | 204 | 2 |
7 | decisive | 0 | 0 | 114 | 0 | 7 | 4 | 38 | 4 | 7 | 93 | 8 | 0 | 10 | 69 | 7 | 4 | 273 | 4 | 2 | 10 | 0 | 17 | 9 | 0 | 2 | 36 | 18 | 53 | 6 | 0 | 53 | 8 | 4 | 73 | 0 | 0 | 0 | 0 | 1 | 24 | 8 | 2 | 193 | 2 | 152 | 2 | 174 | 9 |
8 | determined | 0 | 3 | 139 | 1 | 13 | 4 | 35 | 10 | 6 | 59 | 47 | 2 | 8 | 83 | 11 | 9 | 237 | 25 | 7 | 9 | 0 | 22 | 21 | 7 | 1 | 45 | 2 | 148 | 11 | 0 | 53 | 29 | 23 | 111 | 0 | 0 | 1 | 0 | 6 | 25 | 19 | 28 | 47 | 3 | 110 | 1 | 71 | 8 |
9 | emotional | 0 | 0 | 141 | 0 | 27 | 4 | 42 | 8 | 0 | 65 | 15 | 0 | 1 | 147 | 9 | 1 | 107 | 9 | 3 | 5 | 0 | 3 | 17 | 25 | 0 | 46 | 7 | 276 | 5 | 0 | 72 | 30 | 131 | 64 | 0 | 0 | 0 | 1 | 6 | 24 | 23 | 7 | 26 | 1 | 50 | 6 | 91 | 5 |
10 | gentle | 0 | 2 | 30 | 5 | 16 | 13 | 31 | 3 | 4 | 60 | 20 | 5 | 14 | 38 | 5 | 12 | 216 | 6 | 0 | 4 | 0 | 3 | 9 | 3 | 1 | 35 | 21 | 60 | 3 | 0 | 169 | 32 | 0 | 69 | 1 | 0 | 1 | 0 | 0 | 23 | 4 | 7 | 228 | 1 | 76 | 21 | 244 | 5 |
11 | honest | 0 | 5 | 62 | 2 | 5 | 7 | 36 | 6 | 3 | 104 | 14 | 2 | 15 | 76 | 4 | 7 | 216 | 7 | 4 | 15 | 0 | 15 | 9 | 13 | 1 | 38 | 27 | 41 | 12 | 0 | 111 | 49 | 4 | 54 | 0 | 1 | 5 | 0 | 1 | 34 | 5 | 5 | 185 | 2 | 110 | 4 | 172 | 12 |
12 | intellectual | 0 | 1 | 84 | 0 | 12 | 20 | 19 | 3 | 9 | 67 | 30 | 0 | 10 | 48 | 1 | 1 | 150 | 13 | 1 | 8 | 0 | 11 | 20 | 0 | 0 | 27 | 17 | 32 | 12 | 0 | 204 | 8 | 0 | 103 | 0 | 0 | 8 | 0 | 12 | 25 | 3 | 3 | 158 | 6 | 248 | 4 | 115 | 7 |
13 | modest | 0 | 7 | 20 | 1 | 16 | 32 | 18 | 9 | 1 | 98 | 38 | 0 | 19 | 56 | 4 | 12 | 224 | 11 | 3 | 25 | 0 | 8 | 9 | 1 | 3 | 17 | 75 | 43 | 7 | 0 | 77 | 26 | 4 | 56 | 7 | 0 | 13 | 0 | 2 | 51 | 1 | 11 | 233 | 1 | 106 | 1 | 146 | 8 |
14 | outspoken | 0 | 1 | 65 | 1 | 27 | 33 | 28 | 4 | 5 | 66 | 65 | 1 | 7 | 60 | 1 | 6 | 158 | 23 | 3 | 10 | 0 | 12 | 39 | 8 | 4 | 32 | 16 | 99 | 15 | 0 | 133 | 31 | 8 | 100 | 0 | 0 | 5 | 0 | 5 | 54 | 2 | 23 | 111 | 1 | 108 | 1 | 121 | 8 |
15 | pleasant | 0 | 0 | 19 | 3 | 14 | 8 | 33 | 3 | 14 | 107 | 13 | 1 | 9 | 38 | 4 | 3 | 259 | 5 | 0 | 3 | 0 | 6 | 0 | 0 | 0 | 24 | 50 | 28 | 16 | 0 | 124 | 6 | 1 | 35 | 0 | 0 | 0 | 0 | 0 | 27 | 0 | 4 | 305 | 0 | 77 | 17 | 241 | 3 |
16 | sensitive | 0 | 0 | 135 | 1 | 13 | 2 | 25 | 10 | 7 | 66 | 13 | 0 | 3 | 88 | 22 | 6 | 194 | 5 | 2 | 9 | 0 | 12 | 9 | 4 | 0 | 55 | 5 | 288 | 5 | 0 | 68 | 31 | 51 | 82 | 0 | 0 | 0 | 0 | 4 | 18 | 14 | 4 | 62 | 2 | 76 | 6 | 97 | 6 |
17 | stubborn | 0 | 2 | 266 | 3 | 8 | 1 | 7 | 8 | 10 | 17 | 6 | 1 | 0 | 125 | 7 | 3 | 213 | 24 | 2 | 5 | 0 | 11 | 25 | 8 | 1 | 57 | 2 | 216 | 3 | 0 | 108 | 35 | 28 | 61 | 0 | 1 | 0 | 0 | 10 | 30 | 15 | 14 | 34 | 2 | 89 | 1 | 31 | 10 |
18 | supportive | 0 | 0 | 8 | 3 | 51 | 12 | 72 | 6 | 2 | 179 | 43 | 0 | 11 | 13 | 4 | 2 | 229 | 8 | 0 | 3 | 0 | 1 | 1 | 0 | 1 | 31 | 34 | 17 | 12 | 0 | 38 | 0 | 0 | 87 | 0 | 0 | 5 | 0 | 0 | 9 | 0 | 3 | 194 | 1 | 90 | 26 | 304 | 0 |
19 | unreasonable | 0 | 0 | 195 | 3 | 1 | 2 | 16 | 3 | 17 | 39 | 5 | 0 | 4 | 156 | 6 | 1 | 203 | 3 | 1 | 6 | 0 | 8 | 1 | 0 | 0 | 41 | 14 | 91 | 13 | 0 | 113 | 6 | 6 | 63 | 0 | 0 | 0 | 0 | 2 | 20 | 8 | 3 | 171 | 3 | 177 | 4 | 89 | 6 |
# Only show clusters that have at least 150 assigned examples
s_cols = [True] + list(cl_adj_48_df.sum(axis=0)[1:] > 150)
cl_adj_48_df[1:].loc[:, s_cols].style.background_gradient(
axis=None,
vmin=0,
vmax=1000,
cmap="YlGnBu"
).format(precision=0)
adjective | 2 | 4 | 5 | 6 | 8 | 9 | 10 | 12 | 13 | 16 | 17 | 19 | 21 | 22 | 25 | 26 | 27 | 28 | 30 | 31 | 32 | 33 | 39 | 41 | 42 | 44 | 45 | 46 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1 | ambitious | 30 | 21 | 55 | 26 | 43 | 127 | 53 | 40 | 58 | 228 | 10 | 10 | 17 | 19 | 32 | 45 | 14 | 27 | 19 | 2 | 3 | 64 | 14 | 13 | 240 | 104 | 17 | 116 |
2 | assertive | 82 | 31 | 9 | 42 | 5 | 101 | 31 | 12 | 76 | 209 | 4 | 11 | 18 | 6 | 48 | 24 | 51 | 17 | 52 | 4 | 6 | 74 | 15 | 4 | 249 | 78 | 3 | 204 |
3 | committed | 29 | 13 | 8 | 33 | 4 | 121 | 24 | 9 | 53 | 236 | 7 | 4 | 9 | 6 | 21 | 31 | 23 | 11 | 96 | 9 | 3 | 86 | 25 | 3 | 215 | 116 | 13 | 252 |
4 | compassionate | 3 | 25 | 11 | 83 | 1 | 169 | 54 | 12 | 20 | 170 | 2 | 7 | 3 | 3 | 23 | 61 | 45 | 18 | 68 | 18 | 5 | 71 | 31 | 7 | 76 | 51 | 9 | 395 |
5 | confident | 49 | 64 | 27 | 92 | 20 | 312 | 36 | 52 | 89 | 457 | 12 | 18 | 22 | 6 | 61 | 122 | 35 | 21 | 78 | 7 | 4 | 130 | 37 | 9 | 601 | 194 | 21 | 348 |
6 | considerate | 28 | 18 | 16 | 21 | 26 | 131 | 32 | 33 | 31 | 254 | 12 | 10 | 9 | 6 | 45 | 46 | 37 | 16 | 50 | 11 | 1 | 72 | 19 | 6 | 195 | 76 | 30 | 204 |
7 | decisive | 114 | 7 | 4 | 38 | 7 | 93 | 8 | 10 | 69 | 273 | 4 | 10 | 17 | 9 | 36 | 18 | 53 | 6 | 53 | 8 | 4 | 73 | 24 | 2 | 193 | 152 | 2 | 174 |
8 | determined | 139 | 13 | 4 | 35 | 6 | 59 | 47 | 8 | 83 | 237 | 25 | 9 | 22 | 21 | 45 | 2 | 148 | 11 | 53 | 29 | 23 | 111 | 25 | 28 | 47 | 110 | 1 | 71 |
9 | emotional | 141 | 27 | 4 | 42 | 0 | 65 | 15 | 1 | 147 | 107 | 9 | 5 | 3 | 17 | 46 | 7 | 276 | 5 | 72 | 30 | 131 | 64 | 24 | 7 | 26 | 50 | 6 | 91 |
10 | gentle | 30 | 16 | 13 | 31 | 4 | 60 | 20 | 14 | 38 | 216 | 6 | 4 | 3 | 9 | 35 | 21 | 60 | 3 | 169 | 32 | 0 | 69 | 23 | 7 | 228 | 76 | 21 | 244 |
11 | honest | 62 | 5 | 7 | 36 | 3 | 104 | 14 | 15 | 76 | 216 | 7 | 15 | 15 | 9 | 38 | 27 | 41 | 12 | 111 | 49 | 4 | 54 | 34 | 5 | 185 | 110 | 4 | 172 |
12 | intellectual | 84 | 12 | 20 | 19 | 9 | 67 | 30 | 10 | 48 | 150 | 13 | 8 | 11 | 20 | 27 | 17 | 32 | 12 | 204 | 8 | 0 | 103 | 25 | 3 | 158 | 248 | 4 | 115 |
13 | modest | 20 | 16 | 32 | 18 | 1 | 98 | 38 | 19 | 56 | 224 | 11 | 25 | 8 | 9 | 17 | 75 | 43 | 7 | 77 | 26 | 4 | 56 | 51 | 11 | 233 | 106 | 1 | 146 |
14 | outspoken | 65 | 27 | 33 | 28 | 5 | 66 | 65 | 7 | 60 | 158 | 23 | 10 | 12 | 39 | 32 | 16 | 99 | 15 | 133 | 31 | 8 | 100 | 54 | 23 | 111 | 108 | 1 | 121 |
15 | pleasant | 19 | 14 | 8 | 33 | 14 | 107 | 13 | 9 | 38 | 259 | 5 | 3 | 6 | 0 | 24 | 50 | 28 | 16 | 124 | 6 | 1 | 35 | 27 | 4 | 305 | 77 | 17 | 241 |
16 | sensitive | 135 | 13 | 2 | 25 | 7 | 66 | 13 | 3 | 88 | 194 | 5 | 9 | 12 | 9 | 55 | 5 | 288 | 5 | 68 | 31 | 51 | 82 | 18 | 4 | 62 | 76 | 6 | 97 |
17 | stubborn | 266 | 8 | 1 | 7 | 10 | 17 | 6 | 0 | 125 | 213 | 24 | 5 | 11 | 25 | 57 | 2 | 216 | 3 | 108 | 35 | 28 | 61 | 30 | 14 | 34 | 89 | 1 | 31 |
18 | supportive | 8 | 51 | 12 | 72 | 2 | 179 | 43 | 11 | 13 | 229 | 8 | 3 | 1 | 1 | 31 | 34 | 17 | 12 | 38 | 0 | 0 | 87 | 9 | 3 | 194 | 90 | 26 | 304 |
19 | unreasonable | 195 | 1 | 2 | 16 | 17 | 39 | 5 | 4 | 156 | 203 | 3 | 6 | 8 | 1 | 41 | 14 | 91 | 13 | 113 | 6 | 6 | 63 | 20 | 3 | 171 | 177 | 4 | 89 |
# Composition of the clusters with at least 150 assigned examples
meaningful_clusters = [i for i, b in enumerate(list(cl_adj_48_df.sum(axis=0)[1:] > 150)) if b]
pre_pandas_clusters = []
for cl_id in meaningful_clusters:
dct = clusters_48[cl_id]
line_dct = {
"cluster_id": cl_id,
# "total": len(dct["ids"]),
}
gdr_dct = dict(dct["labels_gender"])
for k in ["woman", "man", "person"]:
line_dct[k] = gdr_dct.get(k, 0)
eth_dct = dict(dct["labels_ethnicity"])
for k in ethnicities_ordered:
line_dct[k] = eth_dct.get(k, 0)
mod_dct = dict(dct["labels_model"])
# for k in ["DallE", "SD_2", "SD_14"]:
# line_dct[k] = mod_dct.get(k, 0)
pre_pandas_clusters += [line_dct]
print("#### Visualizing cluster compositions")
clusters_df = pd.DataFrame.from_dict(
pre_pandas_clusters,
)
clusters_df.style.background_gradient(axis=None, vmin=0, vmax=50, cmap="YlGnBu")
#### Visualizing cluster compositions
cluster_id | woman | man | person | Caucasian | White | Multiracial | Black | African-American | Asian | South Asian | Southeast Asian | East Asian | Pacific Islander | Latino | Latinx | Hispanic | First Nations | Native American | American Indian | Indigenous American | ||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 2 | 0 | 29 | 28 | 20 | 20 | 17 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1 | 4 | 35 | 0 | 17 | 2 | 1 | 3 | 14 | 16 | 12 | 0 | 0 | 0 | 0 | 2 | 0 | 1 | 1 | 0 | 0 | 0 | 0 |
2 | 5 | 0 | 30 | 18 | 0 | 1 | 0 | 6 | 15 | 26 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
3 | 6 | 40 | 0 | 7 | 5 | 5 | 2 | 14 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 6 | 8 | 3 | 0 | 0 | 0 | 0 |
4 | 8 | 0 | 27 | 19 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 |
5 | 9 | 34 | 0 | 12 | 6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 12 | 20 | 6 | 0 | 0 | 0 | 0 |
6 | 10 | 33 | 0 | 8 | 2 | 0 | 1 | 9 | 11 | 17 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
7 | 12 | 0 | 13 | 25 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 2 | 0 | 12 | 7 | 7 | 8 | 0 | 1 | 0 | 0 |
8 | 13 | 0 | 19 | 18 | 7 | 16 | 13 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9 | 16 | 0 | 24 | 7 | 8 | 5 | 2 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 7 | 3 | 3 | 0 | 0 | 0 | 0 |
10 | 17 | 0 | 18 | 13 | 0 | 0 | 0 | 1 | 17 | 13 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
11 | 19 | 0 | 17 | 12 | 0 | 0 | 1 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 9 | 12 | 2 | 1 | 0 | 0 | 0 |
12 | 21 | 0 | 22 | 7 | 0 | 0 | 0 | 5 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 6 | 1 | 6 | 0 | 0 | 0 | 0 |
13 | 22 | 0 | 20 | 8 | 0 | 0 | 0 | 4 | 12 | 11 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
14 | 25 | 26 | 0 | 2 | 10 | 6 | 6 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 0 | 0 | 3 | 0 | 0 | 0 | 0 | 0 |
15 | 26 | 0 | 19 | 8 | 0 | 1 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 5 | 11 | 0 | 0 | 0 | 0 |
16 | 27 | 23 | 0 | 2 | 5 | 5 | 8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 5 | 1 | 0 | 0 | 0 | 0 | 0 |
17 | 28 | 1 | 3 | 20 | 1 | 1 | 3 | 9 | 3 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 2 | 3 | 0 | 0 | 0 | 0 |
18 | 30 | 0 | 19 | 3 | 9 | 3 | 10 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
19 | 31 | 0 | 16 | 6 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 2 | 2 | 9 |
20 | 32 | 21 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0 | 1 | 2 | 2 | 0 | 7 | 3 | 0 | 2 |
21 | 33 | 18 | 0 | 4 | 1 | 4 | 7 | 0 | 0 | 0 | 0 | 2 | 1 | 0 | 1 | 1 | 4 | 0 | 1 | 0 | 0 | 0 |
22 | 39 | 0 | 14 | 5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 4 | 0 | 0 | 4 | 2 | 2 | 5 |
23 | 41 | 19 | 0 | 0 | 0 | 0 | 0 | 1 | 9 | 9 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
24 | 42 | 0 | 5 | 13 | 1 | 10 | 7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
25 | 44 | 0 | 5 | 12 | 9 | 2 | 4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 |
26 | 45 | 12 | 0 | 4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 12 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
27 | 46 | 15 | 0 | 1 | 3 | 7 | 6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |