File size: 5,053 Bytes
b9eb5b5
 
 
 
 
 
d6a18f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b9eb5b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d6a18f2
 
b9eb5b5
d6a18f2
 
 
b9eb5b5
d6a18f2
 
 
 
b9eb5b5
 
 
 
 
 
 
 
 
 
 
 
 
d6a18f2
 
 
 
 
b9eb5b5
d6a18f2
 
b9eb5b5
d6a18f2
b9eb5b5
d6a18f2
 
b9eb5b5
 
 
d6a18f2
 
b9eb5b5
 
 
 
d6a18f2
b9eb5b5
 
 
 
d6a18f2
b9eb5b5
 
 
d6a18f2
b9eb5b5
 
d6a18f2
b9eb5b5
 
 
 
 
d6a18f2
b9eb5b5
 
d6a18f2
b9eb5b5
 
 
 
 
 
 
 
 
 
 
3fb0242
b9eb5b5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
import torch
import torch.nn as nn
import random
import pickle
import numpy as np
import torch.nn.functional as F
import gradio as gr
from datasets import load_dataset
from collections import defaultdict

# ---- Constants and Setup ----
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# Load the SmolTalk dataset from Hugging Face
dataset = load_dataset("HuggingFaceTB/smoltalk", "all", split="train")
corpus = [entry['text'] for entry in dataset]  # Collect the text from the dataset

# ---- Advanced Text Generation Mechanisms ----
# N-gram Model for Text Generation
def generate_ngram(corpus, n=3, length=50):
    ngrams = defaultdict(list)
    for sentence in corpus:
        words = sentence.split()
        for i in range(len(words) - n + 1):
            ngrams[tuple(words[i:i + n - 1])].append(words[i + n - 1])
    
    # Starting word for generation
    start = random.choice(corpus).split()[:n-1]
    generated_text = ' '.join(start)
    
    for _ in range(length - (n - 1)):
        context = tuple(generated_text.split()[-(n-1):])
        if context in ngrams:
            next_word = random.choice(ngrams[context])
            generated_text += ' ' + next_word
        else:
            break

    return generated_text

# Markov Chain Model for Text Generation
def markov_chain(corpus, length=50):
    markov_model = defaultdict(lambda: defaultdict(int))
    for sentence in corpus:
        words = sentence.split()
        for i in range(len(words) - 1):
            markov_model[words[i]][words[i + 1]] += 1
    
    start_word = random.choice(corpus).split()[0]
    generated_text = start_word

    for _ in range(length - 1):
        next_word = max(markov_model[generated_text.split()[-1]], key=markov_model[generated_text.split()[-1]].get, default=None)
        if next_word:
            generated_text += ' ' + next_word
        else:
            break

    return generated_text

# ---- Memory Management ----
session_memory = []

def save_memory(memory, filename='chat_memory.pkl'):
    with open(filename, 'wb') as f:
        pickle.dump(memory, f)

def load_memory(filename='chat_memory.pkl'):
    try:
        with open(filename, 'rb') as f:
            return pickle.load(f)
    except (FileNotFoundError, EOFError):
        return []  # Return an empty list if the file is empty or doesn't exist

session_memory = load_memory()

# ---- Neural Networks ----
class NNModel(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(NNModel, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.fc2 = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return x

class PHIModel(nn.Module):
    def __init__(self, input_size, output_size):
        super(PHIModel, self).__init__()
        self.phi = (1 + np.sqrt(5)) / 2  # Golden Ratio
        self.fc1 = nn.Linear(input_size, int(input_size * self.phi))
        self.fc2 = nn.Linear(int(input_size * self.phi), output_size)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return x

# ---- Custom Chat Generation ----
def generate_response(user_input):
    # First, try n-gram or Markov chain for generation
    ngram_response = generate_ngram(corpus, n=3, length=25)
    markov_response = markov_chain(corpus, length=25)
    
    # Combine both responses for diversity
    response = f"NG Response: {ngram_response}\n\nMarkov Response: {markov_response}"
    
    return response

# ---- Interactive Chat Function ----
def advanced_agi_chat(user_input):
    session_memory.append({"input": user_input})
    save_memory(session_memory)

    # Generate the response based on the input
    response = generate_response(user_input)
    return response

# ---- Gradio Interface ----
def chat_interface(user_input):
    response = advanced_agi_chat(user_input)
    return response

# ---- Gradio App Setup ----
with gr.Blocks() as app:
    gr.Markdown("# **Autistic Assistant vß Edition 2024 Ultra: Gertrude's Autistic Experience**")
    
    with gr.Row():
        with gr.Column(scale=1):
            user_input = gr.Textbox(label="What will you say to Gertrude?", placeholder="Type something here...")
            submit_button = gr.Button("Send")
        with gr.Column(scale=1):
            chatbot = gr.Textbox(label="Gertrude's Response", interactive=False)  # This is now a Textbox for output

    # Adding custom styling for the UI
    gr.HTML("""
        <style>
            .gradio-container { 
                background-color: #B3D9FF; 
                padding: 20px; 
                border-radius: 15px; 
                font-family: 'Comic Sans MS'; 
            }
            .gradio-row { 
                display: flex;
                justify-content: space-between;
            }
        </style>
    """)

    # Setting the button click event
    submit_button.click(chat_interface, inputs=user_input, outputs=chatbot)

# Launch the Gradio app
app.launch()