Update app.py
Browse files
app.py
CHANGED
@@ -15,13 +15,6 @@ from transformers import TapasTokenizer, TapasForQuestionAnswering
|
|
15 |
|
16 |
tf.get_logger().setLevel('ERROR')
|
17 |
|
18 |
-
#try:
|
19 |
-
#print(sys.executable)
|
20 |
-
# subprocess.check_call(["/home/user/.local/lib/python3.8", "-m", "pip", "install", 'torch-scatter','-f', 'https://data.pyg.org/whl/torch-1.10.0+cpu.html'])
|
21 |
-
#except Exception as e:
|
22 |
-
# print('Error..', str(e))
|
23 |
-
|
24 |
-
|
25 |
model_name = 'google/tapas-base-finetuned-wtq'
|
26 |
#model_name = "table-question-answering"
|
27 |
#model = pipeline(model_name)
|
@@ -59,8 +52,8 @@ def styling_specific_cell(x,tags,colors):
|
|
59 |
|
60 |
if st.button('Predict Answers'):
|
61 |
with st.spinner('It will take approx a minute'):
|
62 |
-
|
63 |
-
inputs = tokenizer(table=
|
64 |
outputs = model(**inputs)
|
65 |
#outputs = model(table = data, query = queries)
|
66 |
predicted_answer_coordinates, predicted_aggregation_indices = tokenizer.convert_logits_to_predictions( inputs, outputs.logits.detach(), outputs.logits_aggregation.detach())
|
|
|
15 |
|
16 |
tf.get_logger().setLevel('ERROR')
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
model_name = 'google/tapas-base-finetuned-wtq'
|
19 |
#model_name = "table-question-answering"
|
20 |
#model = pipeline(model_name)
|
|
|
52 |
|
53 |
if st.button('Predict Answers'):
|
54 |
with st.spinner('It will take approx a minute'):
|
55 |
+
table = data.astype(str)
|
56 |
+
inputs = tokenizer(table=table , queries=input_queries, padding='max_length',truncation=True, return_tensors="pt")
|
57 |
outputs = model(**inputs)
|
58 |
#outputs = model(table = data, query = queries)
|
59 |
predicted_answer_coordinates, predicted_aggregation_indices = tokenizer.convert_logits_to_predictions( inputs, outputs.logits.detach(), outputs.logits_aggregation.detach())
|