Spaces:
Runtime error
Runtime error
import torch | |
import torch.nn as nn | |
import random | |
import pickle | |
import numpy as np | |
import torch.nn.functional as F | |
import gradio as gr | |
from datasets import load_dataset | |
from collections import defaultdict | |
# ---- Constants and Setup ---- | |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
# Load the SmolTalk dataset from Hugging Face | |
dataset = load_dataset("HuggingFaceTB/smoltalk", "all", split="train") | |
corpus = [entry['text'] for entry in dataset] # Collect the text from the dataset | |
# ---- Advanced Text Generation Mechanisms ---- | |
# N-gram Model for Text Generation | |
def generate_ngram(corpus, n=3, length=50): | |
ngrams = defaultdict(list) | |
for sentence in corpus: | |
words = sentence.split() | |
for i in range(len(words) - n + 1): | |
ngrams[tuple(words[i:i + n - 1])].append(words[i + n - 1]) | |
# Starting word for generation | |
start = random.choice(corpus).split()[:n-1] | |
generated_text = ' '.join(start) | |
for _ in range(length - (n - 1)): | |
context = tuple(generated_text.split()[-(n-1):]) | |
if context in ngrams: | |
next_word = random.choice(ngrams[context]) | |
generated_text += ' ' + next_word | |
else: | |
break | |
return generated_text | |
# Markov Chain Model for Text Generation | |
def markov_chain(corpus, length=50): | |
markov_model = defaultdict(lambda: defaultdict(int)) | |
for sentence in corpus: | |
words = sentence.split() | |
for i in range(len(words) - 1): | |
markov_model[words[i]][words[i + 1]] += 1 | |
start_word = random.choice(corpus).split()[0] | |
generated_text = start_word | |
for _ in range(length - 1): | |
next_word = max(markov_model[generated_text.split()[-1]], key=markov_model[generated_text.split()[-1]].get, default=None) | |
if next_word: | |
generated_text += ' ' + next_word | |
else: | |
break | |
return generated_text | |
# ---- Memory Management ---- | |
session_memory = [] | |
def save_memory(memory, filename='chat_memory.pkl'): | |
with open(filename, 'wb') as f: | |
pickle.dump(memory, f) | |
def load_memory(filename='chat_memory.pkl'): | |
try: | |
with open(filename, 'rb') as f: | |
return pickle.load(f) | |
except (FileNotFoundError, EOFError): | |
return [] # Return an empty list if the file is empty or doesn't exist | |
session_memory = load_memory() | |
# ---- Neural Networks ---- | |
class NNModel(nn.Module): | |
def __init__(self, input_size, hidden_size, output_size): | |
super(NNModel, self).__init__() | |
self.fc1 = nn.Linear(input_size, hidden_size) | |
self.fc2 = nn.Linear(hidden_size, output_size) | |
def forward(self, x): | |
x = F.relu(self.fc1(x)) | |
x = self.fc2(x) | |
return x | |
class PHIModel(nn.Module): | |
def __init__(self, input_size, output_size): | |
super(PHIModel, self).__init__() | |
self.phi = (1 + np.sqrt(5)) / 2 # Golden Ratio | |
self.fc1 = nn.Linear(input_size, int(input_size * self.phi)) | |
self.fc2 = nn.Linear(int(input_size * self.phi), output_size) | |
def forward(self, x): | |
x = F.relu(self.fc1(x)) | |
x = self.fc2(x) | |
return x | |
# ---- Custom Chat Generation ---- | |
def generate_response(user_input): | |
# First, try n-gram or Markov chain for generation | |
ngram_response = generate_ngram(corpus, n=3, length=25) | |
markov_response = markov_chain(corpus, length=25) | |
# Combine both responses for diversity | |
response = f"NG Response: {ngram_response}\n\nMarkov Response: {markov_response}" | |
return response | |
# ---- Interactive Chat Function ---- | |
def advanced_agi_chat(user_input): | |
session_memory.append({"input": user_input}) | |
save_memory(session_memory) | |
# Generate the response based on the input | |
response = generate_response(user_input) | |
return response | |
# ---- Gradio Interface ---- | |
def chat_interface(user_input): | |
response = advanced_agi_chat(user_input) | |
return response | |
# ---- Gradio App Setup ---- | |
with gr.Blocks() as app: | |
gr.Markdown("# **Autistic Assistant vΓ Edition 2024 Ultra: Gertrude's Autistic Experience**") | |
with gr.Row(): | |
with gr.Column(scale=1): | |
user_input = gr.Textbox(label="What will you say to Gertrude?", placeholder="Type something here...") | |
submit_button = gr.Button("Send") | |
with gr.Column(scale=1): | |
chatbot = gr.Textbox(label="Gertrude's Response", interactive=False) # This is now a Textbox for output | |
# Adding custom styling for the UI | |
gr.HTML(""" | |
<style> | |
.gradio-container { | |
background-color: #B3D9FF; | |
padding: 20px; | |
border-radius: 15px; | |
font-family: 'Comic Sans MS'; | |
} | |
.gradio-row { | |
display: flex; | |
justify-content: space-between; | |
} | |
</style> | |
""") | |
# Setting the button click event | |
submit_button.click(chat_interface, inputs=user_input, outputs=chatbot) | |
# Launch the Gradio app | |
app.launch() | |