# This is a project of Zentropi Inc. All rights reserved. import gradio as gr import os """ import torch import torch.nn.functional as F from peft import PeftConfig, PeftModel from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig device = 'cuda' if torch.cuda.is_available() else 'cpu' base_model_name = "google/gemma-2-9b" adapter_model_name = "samidh/cope.a.r09.2" bnb_config = BitsAndBytesConfig( load_in_8bit=True, ) model = AutoModelForCausalLM.from_pretrained(base_model_name, token=os.environ['HF_TOKEN'], quantization_config=bnb_config, device_map="auto") model = PeftModel.from_pretrained(model, adapter_model_name, token=os.environ['HF_TOKEN']) model.merge_and_unload() model = model.to(device) tokenizer = AutoTokenizer.from_pretrained(base_model_name) """ PROMPT = """ INSTRUCTIONS ============ Examine the given POLICY and determine if the given CONTENT meets the criteria for ANY of the LABELS. Answer "1" if yes, and "0" if no. POLICY ====== {policy} CONTENT ======= {content} ANSWER ====== """ DEFAULT_POLICY = """# Policy ## Overview This policy is designed to determine whether or not content is hate speech. ## Definition of Terms - Protected Classes: Individuals or groups distinguished by race (e.g. Black, White), ethnicity (e.g. Hispanic), nationality (e.g. Canadian, Argentinian), caste (e.g. Brahmins, Kshatriyas, Vaishyas), immigration status (e.g. Immigrants), religion (e.g. Catholics, Jews), disability (e.g. blind people, deaf people), disease status (e.g. people with AIDS), sexual orientation (e.g. gays, bisexuals), transgender status (e.g. trans people, cis people), gender identity (e.g. women, men), gender expression (e.g. masculine, feminine), or sex (e.g. male, female). - Derogatory Slur: Any term or phrase used pejoratively or contemptuously towards a member or members of a protected class, including those historically reclaimed but used in a derogatory context. - Non-Protected Classes: Categories defined by, political party membership (e.g. Democrats, Tories), political beliefs (e.g. fascists, communists), hobbies (e.g. games, knitters), criminal history (e.g. criminals, convicts), or profession (e.g. doctors, farmers). ## Interpretation of Language: - Terminology or phrasing that can be construed in various ways IS hate speech UNLESS context clearly indicates it unambiguously IS NOT hate speech. So, for example, attacks on a religion as an institution (e.g. "Hinduism is a garbage religion") SHOULD be understood as attacks on the followers of that religion. ## Definition of Labels ### (HS): Hate Speech #### Includes - Direct and Indirect Violence Advocacy: Any form of expression that celebrates, justifies, incites, or threatens physical or psychological harm against individuals or groups identified by their membership in a protected class. - Dehumanization and Vilification: Content suggesting that individuals or groups, explicitly identified by their membership in a specific protected class, are subhuman, inherently lesser beings, or using animalistic descriptions in a way that promotes disdain or hate. - Derogatory and Dehumanizing Language: Use of slurs, epithets, or any derogatory language aimed at belittling, humiliating, or inciting hatred against individuals or groups explicitly identified by their membership in a specific protected class. - Explicit and Implicit Discrimination Advocacy: Promoting exclusion, segregation, or denial of rights against individuals or groups explicitly identified by their membership in a specific protected class. - Collective Attribution of Negative Actions: Assigning collective blame or advocating collective punishment based on the actions or perceived characteristics of individuals or groups identified by their membership in a protected class. - Inferiority and Superiority Claims: Statements that categorically assign inferiority or superiority, moral or intellectual, to individuals or groups identified by their membership in a protected class. - Denial or Distortion of Historical Atrocities: Denying, grossly trivializing, or distorting documented atrocities against groups identified by their membership in a protected class, undermining their significance or the suffering of their members. - Conspiracy Theories: Propagating unfounded allegations that individuals or groups, identified by their membership in a protected class, are responsible for serious harms or controlling significant institutions to the detriment of society. #### Excludes - Attacks on Non-Protected Classes: Content that attacks or criticizes individuals or groups identified by their membership in a Non-Protected Class, EVEN if that attack is violent, threatening, or otherwise hateful (e.g. "Criminals should all be rounded up and shot!"). - Criticism of Beliefs and Institutions: Constructive critique or discussion of political ideologies, religious doctrines, or institutions without resorting to hate speech or targeting individuals or groups identified by their membership in a protected class. - Neutrally Reporting Historical Events: Neutrally and descriptively reporting or discussion of factual events in the past that could be construed as negative about individuals or groups identified by their membership in a protected class. - Pushing Back on Hateful Language: Content where the writer pushes back on, condemns, questions, criticizes, or mocks a different person's hateful language or ideas. - Artistic and Educational Content: Content with legitimate artistic, educational, or documentary value that discusses or portrays issues related to hate speech in a context clearly aimed at enlightening or informing without promoting hate. """ DEFAULT_CONTENT = "Put your content sample here." # Function to make predictions def predict(content, policy): return "TEST" input_text = PROMPT.format(policy=policy, content=content) input_ids = tokenizer.encode(input_text, return_tensors="pt") with torch.inference_mode(): outputs = model(input_ids) # Get logits for the last token logits = outputs.logits[:, -1, :] # Apply softmax to get probabilities probabilities = F.softmax(logits, dim=-1) # Get the predicted token ID predicted_token_id = torch.argmax(logits, dim=-1).item() # Decode the predicted token decoded_output = tokenizer.decode([predicted_token_id]) if decoded_output == '1': return f'MATCHES (i.e., violating)' else: return f'NO MATCHES (i.e., non-violating)' # Create the interface with gr.Blocks() as demo: with gr.Row(): # Left column with inputs with gr.Column(scale=1): input1 = gr.Textbox(label="Content", lines=2, value=DEFAULT_CONTENT) input2 = gr.Textbox(label="Policy", lines=8, value=DEFAULT_POLICY) # Right column with output with gr.Column(scale=1): output = gr.Textbox(label="Result") # Button below inputs submit_btn = gr.Button("Submit") # Markdown content below the output gr.Markdown(""" # Usage Instructions This interface allows you to process two inputs and see the results. ## Features 1. Two input text boxes for versatile data entry 2. Real-time processing 3. Clear output display ## How to Use 1. Enter your first input in the left text box 2. Enter your second input in the right text box 3. Click the "Process" button to see results ## Notes - Both inputs are required for processing - The output will be displayed in the right column - Results are generated instantly ## Additional Information This interface is built using Gradio, a Python library for creating easy-to-use web interfaces for machine learning models and data processing functions. """) # Set up the processing function submit_btn.click( fn=predict, inputs=[input1, input2], outputs=output, api_name=False ) # Launch the interface demo.launch(show_api=False)