from transformers import AutoTokenizer, AutoModelForSeq2SeqLM from sentence_transformers import SentenceTransformer from datasets import load_dataset import faiss import numpy as np import streamlit as st # Load a public legal guidance dataset dataset = load_dataset("lex_glue", "ecthr_a") texts = dataset['train']['text'][:100] # Limiting to 100 samples for efficiency # Initialize Sentence-BERT for document encoding and T5 for summarization sbert_model = SentenceTransformer("all-mpnet-base-v2") t5_tokenizer = AutoTokenizer.from_pretrained("t5-small") t5_model = AutoModelForSeq2SeqLM.from_pretrained("t5-small") # Encode the legal guidance texts and build FAISS index case_embeddings = sbert_model.encode(texts, convert_to_tensor=True, show_progress_bar=True) index = faiss.IndexFlatL2(case_embeddings.shape[1]) index.add(np.array(case_embeddings.cpu())) # Function to retrieve similar cases def retrieve_cases(query, top_k=3): query_embedding = sbert_model.encode(query, convert_to_tensor=True) _, indices = index.search(np.array([query_embedding.cpu()]), top_k) return [(texts[i], i) for i in indices[0]] # Function to summarize a given text def summarize_text(text): inputs = t5_tokenizer("summarize: " + text, return_tensors="pt", max_length=512, truncation=True) outputs = t5_model.generate(inputs["input_ids"], max_length=150, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True) return t5_tokenizer.decode(outputs[0], skip_special_tokens=True) # Streamlit UI for LawyerGuide App def main(): st.title("LawyerGuide App: Legal Guidance for False Accusations") query = st.text_input("Describe your situation or legal concern:") top_k = st.slider("Number of similar cases to retrieve:", 1, 5, 3) if st.button("Get Guidance"): results = retrieve_cases(query, top_k=top_k) for i, (case_text, index) in enumerate(results): st.subheader(f"Guidance {i+1}") st.write("Relevant Text:", case_text) summary = summarize_text(case_text) st.write("Summary of Legal Guidance:", summary) if __name__ == "__main__": main()