TejAndrewsACC commited on
Commit
b9eb5b5
Β·
verified Β·
1 Parent(s): fe6a8fb

Update App.py

Browse files
Files changed (1) hide show
  1. App.py +150 -0
App.py CHANGED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import random
4
+ import pickle
5
+ import gradio as gr
6
+ import numpy as np
7
+ import torch.nn.functional as F
8
+ import string
9
+
10
+ # ---- Memory Management ----
11
+ session_memory = []
12
+
13
+ def save_memory(memory, filename='chat_memory.pkl'):
14
+ with open(filename, 'wb') as f:
15
+ pickle.dump(memory, f)
16
+
17
+ def load_memory(filename='chat_memory.pkl'):
18
+ try:
19
+ with open(filename, 'rb') as f:
20
+ return pickle.load(f)
21
+ except (FileNotFoundError, EOFError):
22
+ return [] # Return an empty list if the file is empty or doesn't exist
23
+
24
+ session_memory = load_memory()
25
+
26
+ # ---- Character-Level RNN Model ----
27
+ class CharRNN(nn.Module):
28
+ def __init__(self, input_size, hidden_size, output_size):
29
+ super(CharRNN, self).__init__()
30
+ self.hidden_size = hidden_size
31
+ self.rnn = nn.RNN(input_size, hidden_size, batch_first=True)
32
+ self.fc = nn.Linear(hidden_size, output_size)
33
+
34
+ def forward(self, x, hidden):
35
+ out, hidden = self.rnn(x, hidden)
36
+ out = self.fc(out[:, -1, :]) # Use last time-step
37
+ return out, hidden
38
+
39
+ def init_hidden(self, batch_size):
40
+ return torch.zeros(batch_size, self.hidden_size).to(device)
41
+
42
+ # ---- PHI Model ----
43
+ class PHIModel(nn.Module):
44
+ def __init__(self, input_size, output_size):
45
+ super(PHIModel, self).__init__()
46
+ self.phi = (1 + np.sqrt(5)) / 2 # Golden Ratio
47
+ self.fc1 = nn.Linear(input_size, int(input_size * self.phi))
48
+ self.fc2 = nn.Linear(int(input_size * self.phi), output_size)
49
+
50
+ def forward(self, x):
51
+ x = F.relu(self.fc1(x))
52
+ x = self.fc2(x)
53
+ return x
54
+
55
+ # ---- Helper Functions ----
56
+ # Generate a sequence of characters as a response to the input
57
+ def generate_response_rnn(model, input_text, char_to_idx, idx_to_char, max_len=100):
58
+ # Convert input text to tensor
59
+ input_tensor = torch.tensor([char_to_idx[c] for c in input_text], dtype=torch.long).unsqueeze(0).to(device)
60
+
61
+ hidden = model.init_hidden(1)
62
+ output_str = input_text
63
+
64
+ # Generate characters one at a time
65
+ for _ in range(max_len):
66
+ output, hidden = model(input_tensor, hidden)
67
+ prob = F.softmax(output, dim=1)
68
+ predicted_idx = torch.multinomial(prob, 1).item()
69
+ predicted_char = idx_to_char[predicted_idx]
70
+
71
+ output_str += predicted_char
72
+ input_tensor = torch.tensor([[predicted_idx]], dtype=torch.long).to(device)
73
+
74
+ return output_str
75
+
76
+ # ---- Training Data ----
77
+ def prepare_data(text):
78
+ # Create a set of all unique characters and map them to indices
79
+ chars = sorted(list(set(text)))
80
+ char_to_idx = {char: idx for idx, char in enumerate(chars)}
81
+ idx_to_char = {idx: char for idx, char in enumerate(chars)}
82
+
83
+ return char_to_idx, idx_to_char
84
+
85
+ # ---- Chat Interface ----
86
+ def simple_chat(user_input):
87
+ session_memory.append({"input": user_input})
88
+ save_memory(session_memory)
89
+
90
+ # Training data (for simplicity, using a sample text)
91
+ sample_text = "hello there, how can I assist you today?"
92
+ char_to_idx, idx_to_char = prepare_data(sample_text)
93
+
94
+ # Initialize the RNN model with appropriate input/output sizes
95
+ input_size = len(char_to_idx)
96
+ hidden_size = 128 # Arbitrary size for hidden layer
97
+ output_size = len(char_to_idx)
98
+
99
+ # Create and load the RNN model
100
+ model = CharRNN(input_size, hidden_size, output_size).to(device)
101
+
102
+ # Load pre-trained weights (here using a dummy initialization for illustration)
103
+ # In a real case, you would load weights from a trained model
104
+ model.load_state_dict(torch.load('char_rnn_model.pth', map_location=device))
105
+ model.eval()
106
+
107
+ # Generate a response using the model
108
+ response = generate_response_rnn(model, user_input, char_to_idx, idx_to_char)
109
+
110
+ return response
111
+
112
+ # ---- Gradio Interface ----
113
+ def chat_interface(user_input):
114
+ response = simple_chat(user_input)
115
+ return response
116
+
117
+ # ---- Gradio App Setup ----
118
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
119
+
120
+ with gr.Blocks() as app:
121
+ gr.Markdown("# **Chatbot with Neural Network and Text Generation**")
122
+
123
+ with gr.Row():
124
+ with gr.Column(scale=1):
125
+ user_input = gr.Textbox(label="What will you say?", placeholder="Type something here...")
126
+ submit_button = gr.Button("Send")
127
+ with gr.Column(scale=1):
128
+ chatbot = gr.Textbox(label="Chatbot Response", interactive=False) # This is now a Textbox for output
129
+
130
+ # Adding custom styling for the UI
131
+ gr.HTML("""
132
+ <style>
133
+ .gradio-container {
134
+ background-color: #F0F8FF;
135
+ padding: 20px;
136
+ border-radius: 15px;
137
+ font-family: 'Arial';
138
+ }
139
+ .gradio-row {
140
+ display: flex;
141
+ justify-content: space-between;
142
+ }
143
+ </style>
144
+ """)
145
+
146
+ # Setting the button click event
147
+ submit_button.click(chat_interface, inputs=user_input, outputs=chatbot)
148
+
149
+ # Launch the Gradio app
150
+ app.launch()