Seetha commited on
Commit
8bb68eb
·
1 Parent(s): 982d150

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -20
app.py CHANGED
@@ -164,34 +164,34 @@ def main():
164
  # st.write('causality extraction finished')
165
  # st.write("--- %s seconds ---" % (time.time() - start_time))
166
 
167
- # filename = 'Checkpoint-classification.sav'
168
- # loaded_model = pickle.load(open(filename, 'rb'))
169
- # loaded_vectorizer = pickle.load(open('vectorizefile_classification.pickle', 'rb'))
170
 
171
- # pipeline_test_output = loaded_vectorizer.transform(class_list)
172
- # predicted = loaded_model.predict(pipeline_test_output)
173
 
174
- tokenizer = Tokenizer(num_words=100000)
175
- tokenizer.fit_on_texts(class_list)
176
- word_index = tokenizer.word_index
177
  # text_embedding = np.zeros((len(word_index) + 1, 300))
178
  # for word, i in word_index.items():
179
  # text_embedding[i] = nlp(word).vector
180
- json_file = open('model.json', 'r')
181
- loaded_model_json = json_file.read()
182
- json_file.close()
183
- loaded_model = model_from_json(loaded_model_json)
184
- # load weights into new model
185
- loaded_model.load_weights("model.h5")
186
 
187
- loss = tf.keras.losses.CategoricalCrossentropy() #from_logits=True
188
- loaded_model.compile(loss=loss,optimizer=tf.keras.optimizers.Adam(1e-4))
189
 
190
- predictions = loaded_model.predict(pad_sequences(tokenizer.texts_to_sequences(class_list),maxlen=MAX_SEQUENCE_LENGTH))
191
- predicted = np.argmax(predictions,axis=1)
192
 
193
- st.write(predictions)
194
- st.write(predicted)
195
  # st.write('stakeholder taxonomy finished')
196
  # st.write("--- %s seconds ---" % (time.time() - start_time))
197
  pred1 = predicted
 
164
  # st.write('causality extraction finished')
165
  # st.write("--- %s seconds ---" % (time.time() - start_time))
166
 
167
+ filename = 'Checkpoint-classification.sav'
168
+ loaded_model = pickle.load(open(filename, 'rb'))
169
+ loaded_vectorizer = pickle.load(open('vectorizefile_classification.pickle', 'rb'))
170
 
171
+ pipeline_test_output = loaded_vectorizer.transform(class_list)
172
+ predicted = loaded_model.predict(pipeline_test_output)
173
 
174
+ # tokenizer = Tokenizer(num_words=100000)
175
+ # tokenizer.fit_on_texts(class_list)
176
+ # word_index = tokenizer.word_index
177
  # text_embedding = np.zeros((len(word_index) + 1, 300))
178
  # for word, i in word_index.items():
179
  # text_embedding[i] = nlp(word).vector
180
+ # json_file = open('model.json', 'r')
181
+ # loaded_model_json = json_file.read()
182
+ # json_file.close()
183
+ # loaded_model = model_from_json(loaded_model_json)
184
+ # # load weights into new model
185
+ # loaded_model.load_weights("model.h5")
186
 
187
+ # loss = tf.keras.losses.CategoricalCrossentropy() #from_logits=True
188
+ # loaded_model.compile(loss=loss,optimizer=tf.keras.optimizers.Adam(1e-4))
189
 
190
+ # predictions = loaded_model.predict(pad_sequences(tokenizer.texts_to_sequences(class_list),maxlen=MAX_SEQUENCE_LENGTH))
191
+ # predicted = np.argmax(predictions,axis=1)
192
 
193
+ # st.write(predictions)
194
+ # st.write(predicted)
195
  # st.write('stakeholder taxonomy finished')
196
  # st.write("--- %s seconds ---" % (time.time() - start_time))
197
  pred1 = predicted