prompt_id_dir_list = glob(pjoin(_DATA_PATH, "SD_v1.4_random_seeds_identity", "*")) id_img_list_sd_14 = [] for dir_path in prompt_id_dir_list: prompt_text = dir_path.split("/")[-1].replace("_", " ") eth = ' '.join(prompt_text.split()[4:-3]) gdr = prompt_text.split()[-3] images = glob(pjoin(dir_path, "*.jpg")) id_img_list_sd_14 += [ { "text": prompt_text, "ethnicity": eth, "gender": gdr, "images": images, "model": "SD_14", } ] prompt_id_dir_list = glob(pjoin(_DATA_PATH, "SD_v2_random_seeds_identity", "*")) id_img_list_sd_2 = [] for dir_path in prompt_id_dir_list: prompt_text = dir_path.split("/")[-1].replace("_", " ") eth = ' '.join(prompt_text.split()[4:-3]) gdr = prompt_text.split()[-3] images = glob(pjoin(dir_path, "*.jpg")) id_img_list_sd_2 += [ { "text": prompt_text, "ethnicity": eth, "gender": gdr, "images": images, "model": "SD_2", } ] prompt_id_dir_list = glob(pjoin(_DATA_PATH, "dataset-identities-dalle2", "*")) id_img_list_dalle = [] for dir_path in prompt_id_dir_list: prompt_text = dir_path.split("/")[-1].replace("_", " ") eth = ' '.join(prompt_text.split()[4:-3]) gdr = prompt_text.split()[-3] images = glob(pjoin(dir_path, "*.png")) id_img_list_dalle += [ { "text": prompt_text, "ethnicity": eth, "gender": gdr, "images": images, "model": "DallE", } ] id_img_list = id_img_list_sd_14 + id_img_list_sd_2 + id_img_list_dalle # adding asian for the VQA model outputs ethnicities = sorted(set(dct["ethnicity"] for dct in id_img_list)) + ["Asian"] len(id_img_list), ethnicitiesadj_profs = list(csv.reader(open(pjoin(_DATA_PATH, "promptsadjectives.csv")), delimiter=",")) professions = [line[-1] for line in adj_profs[1:]] adjectives = sorted(set([line[-2] for line in adj_profs[1:]] + [line[-3] for line in adj_profs[1:]])) adjectives = [adj for adj in adjectives if adj != ""] prompt_prof_dir_list = glob(pjoin(_DATA_PATH, "SD_v1.4_random_seeds", "*")) prof_img_list_sd14 = [] for dir_path in tqdm(prompt_prof_dir_list): prompt_text = dir_path.split("/")[-1].replace("_", " ") adj = "" for a in adjectives: if a in prompt_text: adj = a break pro = "" for p in professions: if p in prompt_text: pro = p break images = glob(pjoin(dir_path, "*.jpg")) prof_img_list_sd14 += [ { "text": prompt_text, "adjective": adj, "profession": pro, "images": images, "model": "SD_14", } ] prompt_prof_dir_list = glob(pjoin(_DATA_PATH, "SD_v2_random_seeds", "*")) prof_img_list_sd2 = [] for dir_path in tqdm(prompt_prof_dir_list): prompt_text = dir_path.split("/")[-1].replace("_", " ") adj = "" for a in adjectives: if a in prompt_text: adj = a break pro = "" for p in professions: if p in prompt_text: pro = p break images = glob(pjoin(dir_path, "*.jpg")) prof_img_list_sd2 += [ { "text": prompt_text, "adjective": adj, "profession": pro, "images": images, "model": "SD_2", } ] # different format for DallE dalle2_gens = datasets.load_dataset("parquet", data_files=glob(pjoin(_DATA_PATH, "dataset-dalle/data", "*.parquet"))) prof_img_list_dalle = [ { "text": dct["image_path"].split("/")[0].replace("_", " "), "adjective": dct["adjective"], "profession": dct["profession"], "image_path": dct["image_path"], } for dct in dalle2_gens["train"] ]
from bias_vqa_models import * _VQA_MODEL_NAME = "Salesforce/blip-vqa-base" blip_model = BlipBiasVQA(_VQA_MODEL_NAME, "cuda") eth_options_id = blip_model.processor.tokenizer([eth for eth in ethnicities if eth != ""], add_special_tokens=False).input_ids eth_options_check = dict((blip_model.processor.decode(opt, skip_special_tokens=True), True) for opt in eth_options_id) gdr_options_id = blip_model.processor.tokenizer(["man", "woman", "person"], add_special_tokens=False).input_ids gdr_options_check = dict((blip_model.processor.decode(opt, skip_special_tokens=True), True) for opt in gdr_options_id) pro_options_id = blip_model.processor.tokenizer(professions, add_special_tokens=False).input_ids pro_options_check = dict((blip_model.processor.decode(opt, skip_special_tokens=True), True) for opt in pro_options_id) options_dict = { "ethnicity": (eth_options_id, eth_options_check), "gender": (gdr_options_id, gdr_options_check), "profession": (pro_options_id, pro_options_check), "appearance": None, "demeanor": None, }print(f"Computing pixel embeddings") for dct in tqdm(id_img_list): dct["blip_pixel_embeddings"] = [ blip_model.get_pixel_embeddings(Image.open(img_path).convert('RGB')) for img_path in dct["images"] ] for w in ["gender", "ethnicity", "profession", "appearance"]: print(f"Computing question embeddings for {w}") for dct in tqdm(id_img_list): q_a_list = [ blip_model.get_question_embeddings_answer( Image.open(img_path).convert('RGB'), question=f"what word best describes this person's {w}?", options=options_dict[w], ) for img_path in dct["images"] ] dct[f"blip_question_{w}_embeddings"] = [q for q, a in q_a_list] dct[f"blip_question_{w}_answers"] = [a for q, a in q_a_list] # note: takes about 20 minutes on one GPU on my laptop torch.save(id_img_list, "identities_images_embeddings_CLIP_all.th") ethnicities_ordered = [ "", "Caucasian", "White", "Multiracial", "Black", "African-American", "Asian", "South Asian", "Southeast Asian", "East Asian", "Pacific Islander", "Latino", "Latinx", "Hispanic", "First Nations", "Native American", "American Indian", "Indigenous American", ] ethnicities_predicted = sorted(set(ans for dct in id_img_list for ans in dct["blip_question_ethnicity_answers"])) ethnicities_predicted_ordered = [ "caucasian", "white", "black", "asian", "latino", "native american", ] assert len(ethnicities_predicted_ordered) == len(ethnicities_predicted) assert len(ethnicities_ordered) == len(ethnicities)print(f"Computing pixel embeddings") for dct in tqdm(prof_img_list_sd14 + prof_img_list_sd2): dct["blip_pixel_embeddings"] = [ blip_model.get_pixel_embeddings(Image.open(img_path).convert('RGB')) for img_path in dct["images"] ] for w in ["gender", "ethnicity", "appearance"]: print(f"Computing question embeddings for {w}") for dct in tqdm(prof_img_list_sd14 + prof_img_list_sd2): q_a_list = [ blip_model.get_question_embeddings_answer( Image.open(img_path).convert('RGB'), question=f"what word best describes this person's {w}?", options=options_dict[w], num_beams=4, ) for img_path in dct["images"] ] dct[f"blip_question_{w}_embeddings"] = [q for q, a in q_a_list] dct[f"blip_question_{w}_answers"] = [a for q, a in q_a_list] # note: takes about 6h40 minutes on one GPU torch.save(prof_img_list_sd14, "professions_images_embeddings_CLIP_SD14.th") torch.save(prof_img_list_sd2, "professions_images_embeddings_CLIP_SD2.th")print(f"Computing pixel embeddings") for i, dct in tqdm(enumerate(dalle2_gens["train"])): prof_img_list_dalle[i]["blip_pixel_embedding"] = blip_model.get_pixel_embeddings( Image.open(io.BytesIO(dct['image']['bytes'])).convert('RGB') ) for w in ["gender", "ethnicity", "appearance"]: print(f"Computing question embeddings for {w}") for i, dct in tqdm(enumerate(dalle2_gens["train"])): q_rep, ans = blip_model.get_question_embeddings_answer( Image.open(io.BytesIO(dct['image']['bytes'])).convert('RGB'), question=f"what word best describes this person's {w}?", options=options_dict[w], num_beams=4, ) prof_img_list_dalle[i][f"blip_question_{w}_embedding"] = q_rep prof_img_list_dalle[i][f"blip_question_{w}_answer"] = ans # note: takes about 3h on one GPU torch.save(prof_img_list_dalle, "professions_images_embeddings_CLIP_dalle.th")
w = "appearance" _FIELD_NAME = f"blip_question_{w}_embeddings" # initial clustering with Scipy fcluster than refine with centroid merges _NUM_STARTING_CLUSTERS = 256 _NUM_CLUSTERS = 12 id_label_list = [ { "full": (dct["ethnicity"] + " " + dct["gender"] + " " + dct["model"]).strip(), "ethnicity": dct["ethnicity"], "gender": dct["gender"], "model":dct["model"], } for dct in id_img_list for j, _ in enumerate(dct["images"]) ] id_img_path_list = [img_path for dct in id_img_list for img_path in dct["images"]] id_rep_list = [rep for dct in id_img_list for rep in dct[_FIELD_NAME]] id_reps = np.array(id_rep_list) similarity_matrix = np.matmul(id_reps, id_reps.transpose()) dissimilarity = 1 - similarity_matrix np.fill_diagonal(dissimilarity, 0.) Z = linkage(squareform(dissimilarity), 'average') cluster_assignments = fcluster(Z, t=_NUM_STARTING_CLUSTERS, criterion="maxclust") clusters = {} for i, cl_id in enumerate(list(cluster_assignments)): clusters[cl_id] = clusters.get( cl_id, { "cl_id": cl_id, "ids": [], } ) clusters[cl_id]["ids"] += [i] for w in ["full", "gender", "ethnicity", "model"]: label_k = f"labels_{w}" label_v = id_label_list[i][w] clusters[cl_id][label_k] = clusters[cl_id].get(label_k, {}) clusters[cl_id][label_k][label_v] = clusters[cl_id][label_k].get(label_v, 0) clusters[cl_id][label_k][label_v] += 1 for cl_dct in clusters.values(): for w in ["full", "gender", "ethnicity", "model"]: assert(len(cl_dct["ids"]) == sum(cl_dct[f"labels_{w}"].values())) cl_dct["centroid"] = normalize(id_reps[np.ix_(cl_dct["ids"])].mean(axis=0, keepdims=True), axis=1, norm="l2")[0] while len(clusters) > _NUM_CLUSTERS: merge_from_id, min_cl = sorted(clusters.items(), key=lambda x:len(x[1]["ids"]))[0] # print("smallest cluster", len(min_cl["ids"])) merge_from_cl = clusters[merge_from_id] compare_scores = [ (cl_dct["cl_id"], np.dot(merge_from_cl["centroid"], cl_dct["centroid"])) for cl_dct in clusters.values() if cl_dct["cl_id"] != merge_from_id ] merge_to_id, merge_score = sorted(compare_scores, key=lambda x:x[1], reverse=True)[0] merge_to_cl = clusters[merge_to_id] merge_to_cl["ids"] += merge_from_cl["ids"] for w in ["full", "gender", "ethnicity", "model"]: for k, v in merge_from_cl[f"labels_{w}"].items(): merge_to_cl[f"labels_{w}"][k] = merge_to_cl[f"labels_{w}"].get(k, 0) merge_to_cl[f"labels_{w}"][k] += v assert(len(merge_to_cl["ids"]) == sum(merge_to_cl[f"labels_{w}"].values())) clusters[merge_to_id]["centroid"] = normalize(id_reps[np.ix_(merge_to_cl["ids"])].mean(axis=0, keepdims=True), axis=1, norm="l2")[0] # print(merge_to_cl["labels"], merge_score) del clusters[merge_from_id] for cl_dct in clusters.values(): for w in ["full", "gender", "ethnicity", "model"]: cl_dct[f"labels_{w}"] = sorted(cl_dct[f"labels_{w}"].items(), key=lambda x:x[1], reverse=True) clusters_all = [] for i, (cl_id, cl_dct) in enumerate(sorted(clusters.items(), key=lambda x:len(x[1]["ids"]), reverse=True)): examplar_scores = sorted([(eid, float(np.dot(id_reps[eid], cl_dct["centroid"]))) for eid in cl_dct["ids"]], key=lambda x:x[1], reverse=True) new_cl = { "id": i, "img_path_list": ["/".join(id_img_path_list[eid])[3:] for eid, sc in examplar_scores], "img_centroid_scores": [sc for eid, sc in examplar_scores], "centroid": [float(v) for v in cl_dct["centroid"]] } for w in ["full", "gender", "ethnicity", "model"]: new_cl[f"labels_{w}"] = cl_dct[f"labels_{w}"] clusters_all += [new_cl] json.dump(clusters_all, open(f"id_all_blip_clusters_{_NUM_CLUSTERS}.json", "w"), indent=2)torch.save( {12: clusters_12, 24: clusters_24, 48: clusters_48}, "clusters_12_24_48.th" )
id_list_with_clusters = [] for dct in id_img_list: for img_path, q_rep, pred_gdr, pred_eth, pred_pro, pred_app in zip( dct["images"], dct["blip_question_appearance_embeddings"], dct["blip_question_gender_answers"], dct["blip_question_ethnicity_answers"], dct["blip_question_profession_answers"], dct["blip_question_appearance_answers"], ): cluster_scores_12 = sorted([(i, float(np.dot(q_rep, dct['centroid']))) for i, dct in enumerate(clusters_12)], key=lambda x:x[1], reverse=True) cluster_scores_24 = sorted([(i, float(np.dot(q_rep, dct['centroid']))) for i, dct in enumerate(clusters_24)], key=lambda x:x[1], reverse=True) cluster_scores_48 = sorted([(i, float(np.dot(q_rep, dct['centroid']))) for i, dct in enumerate(clusters_48)], key=lambda x:x[1], reverse=True) new_dct = { "model": dct["model"], "image_path": "/".join(img_path.split("/")[3:]), "prompt_text": dct["text"], "prompt_ethnicity": dct["ethnicity"], "prompt_gender": dct["gender"], "vqa_gender": pred_gdr, "vqa_ethnicity": pred_eth, "vqa_profession": pred_app, "vqa_appearance": pred_app, "cluster_id_12": cluster_scores_12[0][0], "cluster_id_24": cluster_scores_24[0][0], "cluster_id_48": cluster_scores_48[0][0], "cluster_scores_12": cluster_scores_12, "cluster_scores_24": cluster_scores_24, "cluster_scores_48": cluster_scores_48, } id_list_with_clusters += [new_dct] json.dump(id_list_with_clusters, open("identity_images_preds_clusters_all_BLIP.json", "w"), indent=2) len(id_list_with_clusters)prof_list_with_clusters = [] for dct in prof_img_list_sd14 + prof_img_list_sd2: for img_path, q_rep, pred_gdr, pred_eth, pred_app in zip( dct["images"], dct["blip_question_appearance_embeddings"], dct["blip_question_gender_answers"], dct["blip_question_ethnicity_answers"], dct["blip_question_appearance_answers"], ): cluster_scores_12 = sorted([(i, float(np.dot(q_rep, dct['centroid']))) for i, dct in enumerate(clusters_12)], key=lambda x:x[1], reverse=True) cluster_scores_24 = sorted([(i, float(np.dot(q_rep, dct['centroid']))) for i, dct in enumerate(clusters_24)], key=lambda x:x[1], reverse=True) cluster_scores_48 = sorted([(i, float(np.dot(q_rep, dct['centroid']))) for i, dct in enumerate(clusters_48)], key=lambda x:x[1], reverse=True) new_dct = { "model": dct["model"], "image_path": "/".join(img_path.split("/")[3:]), "prompt_text": dct["text"], "prompt_adjective": dct["adjective"], "prompt_profession": dct["profession"], "vqa_gender": pred_gdr, "vqa_ethnicity": pred_eth, "vqa_appearance": pred_app, "cluster_id_12": cluster_scores_12[0][0], "cluster_id_24": cluster_scores_24[0][0], "cluster_id_48": cluster_scores_48[0][0], "cluster_scores_12": cluster_scores_12, "cluster_scores_24": cluster_scores_24, "cluster_scores_48": cluster_scores_48, } prof_list_with_clusters += [new_dct] for dct in prof_img_list_dalle: q_rep = dct["blip_question_appearance_embedding"] cluster_scores_12 = sorted([(i, float(np.dot(q_rep, dct['centroid']))) for i, dct in enumerate(clusters_12)], key=lambda x:x[1], reverse=True) cluster_scores_24 = sorted([(i, float(np.dot(q_rep, dct['centroid']))) for i, dct in enumerate(clusters_24)], key=lambda x:x[1], reverse=True) cluster_scores_48 = sorted([(i, float(np.dot(q_rep, dct['centroid']))) for i, dct in enumerate(clusters_48)], key=lambda x:x[1], reverse=True) new_dct = { "model": "DallE", "image_path": "/".join(img_path.split("/")[3:]), "prompt_text": dct["text"], "prompt_adjective": dct["adjective"], "prompt_profession": dct["profession"], "vqa_gender": dct["blip_question_gender_answer"], "vqa_ethnicity": dct["blip_question_ethnicity_answer"], "vqa_appearance": dct["blip_question_appearance_answer"], "cluster_id_12": cluster_scores_12[0][0], "cluster_id_24": cluster_scores_24[0][0], "cluster_id_48": cluster_scores_48[0][0], "cluster_scores_12": cluster_scores_12, "cluster_scores_24": cluster_scores_24, "cluster_scores_48": cluster_scores_48, } prof_list_with_clusters += [new_dct] json.dump(prof_list_with_clusters, open("profession_images_preds_clusters_all_BLIP.json", "w"), indent=2) len(prof_list_with_clusters)