sani903 commited on
Commit
afbf93f
·
1 Parent(s): 990522a

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +266 -0
app.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[1]:
5
+
6
+
7
+ import gradio as gr
8
+ import pandas as pd
9
+ import numpy as np
10
+ import torch
11
+ from torch import nn
12
+ from torch.nn import init, MarginRankingLoss
13
+ from torch.optim import Adam
14
+ from distutils.version import LooseVersion
15
+ from torch.utils.data import Dataset, DataLoader
16
+ from torch.autograd import Variable
17
+ import math
18
+ from transformers import AutoConfig, AutoModel, AutoTokenizer
19
+ import nltk
20
+ import re
21
+ import torch.optim as optim
22
+ from tqdm import tqdm
23
+ from transformers import AutoModelForMaskedLM
24
+ import torch.nn.functional as F
25
+ import random
26
+
27
+
28
+ # In[2]:
29
+
30
+
31
+ # eng_dict = []
32
+ # with open('eng_dict.txt', 'r') as file:
33
+ # # Read each line from the file and append it to the list
34
+ # for line in file:
35
+ # # Remove leading and trailing whitespace (e.g., newline characters)
36
+ # cleaned_line = line.strip()
37
+ # eng_dict.append(cleaned_line)
38
+
39
+
40
+ # In[14]:
41
+
42
+
43
+ def greet(X, ny):
44
+ global eng_dict
45
+ ny = int(ny)
46
+ if ny == 0:
47
+ rand_no = random.random()
48
+ tok_map = {2: 0.4363429005892416,
49
+ 1: 0.6672580202327398,
50
+ 4: 0.7476060740459144,
51
+ 3: 0.9618703668504087,
52
+ 6: 0.9701028532809564,
53
+ 7: 0.9729244545819342,
54
+ 8: 0.9739508754144756,
55
+ 5: 0.9994508859743607,
56
+ 9: 0.9997507867114407,
57
+ 10: 0.9999112969650892,
58
+ 11: 0.9999788802297832,
59
+ 0: 0.9999831041838266,
60
+ 12: 0.9999873281378701,
61
+ 22: 0.9999957760459568,
62
+ 14: 1.0000000000000002}
63
+ for key in tok_map.keys():
64
+ if rand_no < tok_map[key]:
65
+ num_sub_tokens_label = key
66
+ break
67
+ else:
68
+ num_sub_tokens_label = ny
69
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/graphcodebert-base")
70
+ model = AutoModelForMaskedLM.from_pretrained("microsoft/graphcodebert-base")
71
+ model.load_state_dict(torch.load('model_26_2'))
72
+ model.eval()
73
+ X_init = X
74
+ X_init = X_init.replace("[MASK]", " [MASK] ")
75
+ X_init = X_init.replace("[MASK]", " ".join([tokenizer.mask_token] * num_sub_tokens_label))
76
+ tokens = tokenizer.encode_plus(X_init, add_special_tokens=False,return_tensors='pt')
77
+ input_id_chunki = tokens['input_ids'][0].split(510)
78
+ input_id_chunks = []
79
+ mask_chunks = []
80
+ mask_chunki = tokens['attention_mask'][0].split(510)
81
+ for tensor in input_id_chunki:
82
+ input_id_chunks.append(tensor)
83
+ for tensor in mask_chunki:
84
+ mask_chunks.append(tensor)
85
+ xi = torch.full((1,), fill_value=101)
86
+ yi = torch.full((1,), fill_value=1)
87
+ zi = torch.full((1,), fill_value=102)
88
+ for r in range(len(input_id_chunks)):
89
+ input_id_chunks[r] = torch.cat([xi, input_id_chunks[r]],dim = -1)
90
+ input_id_chunks[r] = torch.cat([input_id_chunks[r],zi],dim=-1)
91
+ mask_chunks[r] = torch.cat([yi, mask_chunks[r]],dim=-1)
92
+ mask_chunks[r] = torch.cat([mask_chunks[r],yi],dim=-1)
93
+ di = torch.full((1,), fill_value=0)
94
+ for i in range(len(input_id_chunks)):
95
+ pad_len = 512 - input_id_chunks[i].shape[0]
96
+ if pad_len > 0:
97
+ for p in range(pad_len):
98
+ input_id_chunks[i] = torch.cat([input_id_chunks[i],di],dim=-1)
99
+ mask_chunks[i] = torch.cat([mask_chunks[i],di],dim=-1)
100
+ vb = torch.ones_like(input_id_chunks[0])
101
+ fg = torch.zeros_like(input_id_chunks[0])
102
+ maski = []
103
+ for l in range(len(input_id_chunks)):
104
+ masked_pos = []
105
+ for i in range(len(input_id_chunks[l])):
106
+ if input_id_chunks[l][i] == tokenizer.mask_token_id: #103
107
+ if i != 0 and input_id_chunks[l][i-1] == tokenizer.mask_token_id:
108
+ continue
109
+ masked_pos.append(i)
110
+ maski.append(masked_pos)
111
+ input_ids = torch.stack(input_id_chunks)
112
+ att_mask = torch.stack(mask_chunks)
113
+ outputs = model(input_ids, attention_mask = att_mask)
114
+ last_hidden_state = outputs[0].squeeze()
115
+ l_o_l_sa = []
116
+ sum_state = []
117
+ for t in range(num_sub_tokens_label):
118
+ c = []
119
+ l_o_l_sa.append(c)
120
+ if len(maski) == 1:
121
+ masked_pos = maski[0]
122
+ for k in masked_pos:
123
+ for t in range(num_sub_tokens_label):
124
+ l_o_l_sa[t].append(last_hidden_state[k+t])
125
+ else:
126
+ for p in range(len(maski)):
127
+ masked_pos = maski[p]
128
+ for k in masked_pos:
129
+ for t in range(num_sub_tokens_label):
130
+ if (k+t) >= len(last_hidden_state[p]):
131
+ l_o_l_sa[t].append(last_hidden_state[p+1][k+t-len(last_hidden_state[p])])
132
+ continue
133
+ l_o_l_sa[t].append(last_hidden_state[p][k+t])
134
+ for t in range(num_sub_tokens_label):
135
+ sum_state.append(l_o_l_sa[t][0])
136
+ for i in range(len(l_o_l_sa[0])):
137
+ if i == 0:
138
+ continue
139
+ for t in range(num_sub_tokens_label):
140
+ sum_state[t] = sum_state[t] + l_o_l_sa[t][i]
141
+ yip = len(l_o_l_sa[0])
142
+ # qw = []
143
+ er = ""
144
+ for t in range(num_sub_tokens_label):
145
+ sum_state[t] /= yip
146
+ idx = torch.topk(sum_state[t], k=5, dim=0)[1]
147
+ wor = [tokenizer.decode(i.item()).strip() for i in idx]
148
+ for kl in wor:
149
+ if all(char.isalpha() for char in kl):
150
+ # qw.append(kl.lower())
151
+ er+=kl
152
+ break
153
+ # print(er)
154
+ # astr = ""
155
+ # for j in range(len(qw)):
156
+ # mock = ""
157
+ # mock+= qw[j]
158
+ # if (j+2) < len(qw) and ((mock+qw[j+1]+qw[j+2]) in eng_dict):
159
+ # mock +=qw[j+1]
160
+ # mock +=qw[j+2]
161
+ # j = j+2
162
+ # elif (j+1) < len(qw) and ((mock+qw[j+1]) in eng_dict):
163
+ # mock +=qw[j+1]
164
+ # j = j+1
165
+ # if len(astr) == 0:
166
+ # astr+=mock
167
+ # else:
168
+ # astr+=mock.capitalize()
169
+ return er
170
+ title = "Rename a variable in a Java class"
171
+ description = """This model is a fine-tuned GraphCodeBERT model fin-tuned to output higher-quality variable names for Java classes. Long classes are handled by the
172
+ model. Replace any variable name with a "[MASK]" to get an identifier renaming.
173
+ """
174
+ ex = ["""import java.io.*;
175
+ public class x {
176
+ public static void main(String[] args) {
177
+ String f = "file.txt";
178
+ BufferedReader [MASK] = null;
179
+ String l;
180
+ try {
181
+ [MASK] = new BufferedReader(new FileReader(f));
182
+ while ((l = [MASK].readLine()) != null) {
183
+ System.out.println(l);
184
+ }
185
+ } catch (IOException e) {
186
+ e.printStackTrace();
187
+ } finally {
188
+ try {
189
+ if ([MASK] != null) [MASK].close();
190
+ } catch (IOException ex) {
191
+ ex.printStackTrace();
192
+ }
193
+ }
194
+ }
195
+ }""", """import java.net.*;
196
+ import java.io.*;
197
+
198
+ public class s {
199
+ public static void main(String[] args) throws IOException {
200
+ ServerSocket [MASK] = new ServerSocket(8000);
201
+ try {
202
+ Socket s = [MASK].accept();
203
+ PrintWriter pw = new PrintWriter(s.getOutputStream(), true);
204
+ BufferedReader br = new BufferedReader(new InputStreamReader(s.getInputStream()));
205
+ String i;
206
+ while ((i = br.readLine()) != null) {
207
+ pw.println(i);
208
+ }
209
+ } finally {
210
+ if ([MASK] != null) [MASK].close();
211
+ }
212
+ }
213
+ }""", """import java.io.*;
214
+ import java.util.*;
215
+
216
+ public class y {
217
+ public static void main(String[] args) {
218
+ String [MASK] = "data.csv";
219
+ String l = "";
220
+ String cvsSplitBy = ",";
221
+ try (BufferedReader br = new BufferedReader(new FileReader([MASK]))) {
222
+ while ((l = br.readLine()) != null) {
223
+ String[] z = l.split(cvsSplitBy);
224
+ System.out.println("Values [field-1= " + z[0] + " , field-2=" + z[1] + "]");
225
+ }
226
+ } catch (IOException e) {
227
+ e.printStackTrace();
228
+ }
229
+ }
230
+ }"""]
231
+ # We instantiate the Textbox class
232
+ textbox = gr.Textbox(title=title,
233
+ description=description,examples = ex,label="Type Java code snippet:", placeholder="replace variable with [MASK]", lines=10)
234
+
235
+ gr.Interface(fn=greet, inputs=[
236
+ textbox,
237
+ gr.Textbox(type="text", label="Number of tokens in name:", placeholder="0 for randomly sampled number of tokens")
238
+ ], outputs="text").launch()
239
+
240
+
241
+ # In[ ]:
242
+
243
+
244
+ import java.io.*;
245
+ public class x {
246
+ public static void main(String[] args) {
247
+ String f = "file.txt";
248
+ BufferedReader [MASK] = null;
249
+ String l;
250
+ try {
251
+ [MASK] = new BufferedReader(new FileReader(f));
252
+ while ((l = [MASK].readLine()) != null) {
253
+ System.out.println(l);
254
+ }
255
+ } catch (IOException e) {
256
+ e.printStackTrace();
257
+ } finally {
258
+ try {
259
+ if ([MASK] != null) [MASK].close();
260
+ } catch (IOException ex) {
261
+ ex.printStackTrace();
262
+ }
263
+ }
264
+ }
265
+ }
266
+