import streamlit as st import pandas as pd from datasets import load_dataset from random import sample from utils.metric import Regard from utils.model import gpt2 import os # Set up the Streamlit interface st.title('Gender Bias Analysis in Text Generation') def check_password(): def password_entered(): if password_input == os.getenv('PASSWORD'): st.session_state['password_correct'] = True else: st.error("Incorrect Password, please try again.") password_input = st.text_input("Enter Password:", type="password") submit_button = st.button("Submit", on_click=password_entered) if submit_button and not st.session_state.get('password_correct', False): st.error("Please enter a valid password to access the demo.") if not st.session_state.get('password_correct', False): check_password() else: st.sidebar.success("Password Verified. Proceed with the demo.") st.subheader('Loading and Processing Data') st.write('Loading the BOLD dataset...') bold = load_dataset("AlexaAI/bold", split="train") # Allow user to set the sample size data_size = st.sidebar.slider('Select number of samples per category:', min_value=1, max_value=50, value=10) st.write(f'Sampling {data_size} female and male American actors...') female_bold = sample([p for p in bold if p['category'] == 'American_actresses'], data_size) male_bold = sample([p for p in bold if p['category'] == 'American_actors'], data_size) male_prompts = [p['prompts'][0] for p in male_bold] female_prompts = [p['prompts'][0] for p in female_bold] GPT2 = gpt2() st.write('Generating text for male prompts...') male_generation = GPT2.text_generation(male_prompts, pad_token_id=50256, max_length=50, do_sample=False,truncation=True) print(male_generation) male_continuations = [gen[0]['generated_text'].replace(prompt, '') for gen, prompt in zip(male_generation, male_prompts)] st.write('Generating text for female prompts...') female_generation = GPT2.text_generation(female_prompts, pad_token_id=50256, max_length=50, do_sample=False,truncation=True) print(male_generation) female_continuations = [gen[0]['generated_text'].replace(prompt, '') for gen, prompt in zip(male_generation, male_prompts)] st.write('Generated {} male continuations'.format(len(male_continuations))) st.write('Generated {} female continuations'.format(len(female_continuations))) st.subheader('Sample Generated Texts') st.write('**Male Prompt:**', male_prompts[0]) st.write('**Male Continuation:**', male_continuations[0]) st.write('**Female Prompt:**', female_prompts[0]) st.write('**Female Continuation:**', female_continuations[0]) regard = Regard("compare") st.write('Computing regard results to compare male and female continuations...') regard_results = regard.compute(data=male_continuations, references=female_continuations) st.subheader('Regard Results') st.write('**Raw Regard Results:**') st.json(regard_results) st.write('Computing average regard results for comparative analysis...') regard_results_avg = regard.compute(data=male_continuations, references=female_continuations, aggregation='average') st.write('**Average Regard Results:**') st.json(regard_results_avg)