Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -71,12 +71,15 @@ from huggingface_hub import HfFileSystem
|
|
71 |
|
72 |
# dataset = load_dataset('Seetha/Visualization', streaming=True)
|
73 |
# df = pd.DataFrame.from_dict(dataset['train'])
|
74 |
-
DATASET_REPO_URL = "https://huggingface.co/datasets/Seetha/Visualization"
|
75 |
-
DATA_FILENAME = "level2.json"
|
76 |
#DATA_FILE = os.path.join("data", DATA_FILENAME)
|
|
|
|
|
|
|
77 |
|
78 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
79 |
-
st.write("is none?", HF_TOKEN is None)
|
80 |
|
81 |
def main():
|
82 |
|
@@ -481,28 +484,12 @@ def main():
|
|
481 |
'value': int(df_tab.loc[row, col])
|
482 |
})
|
483 |
|
484 |
-
HfApi().delete_file(path_in_repo =
|
485 |
st.write('file-deleted')
|
486 |
fs = HfFileSystem(token=HF_TOKEN)
|
487 |
-
with fs.open('datasets/Seetha/
|
488 |
json.dump(json_data, f)
|
489 |
-
|
490 |
-
# level2_df = pd.read_json(dat)
|
491 |
-
# level2_df = pd.DataFrame.from_dict(dat, orient='index')
|
492 |
-
# level2_df.reset_index(level=0,inplace=True)
|
493 |
-
#st.write(level2_df)
|
494 |
-
# with open('level2.json','r+') as fi:
|
495 |
-
# data = fi.read()
|
496 |
-
# #st.write(data)
|
497 |
-
# fi.seek(0)
|
498 |
-
# fi.write(dat)
|
499 |
-
# fi.truncate()
|
500 |
-
|
501 |
-
#updated_dataset = dataset.map(lambda example: {'new_value': level2_df['value'], 'new_source':level2_df['source'], 'new_target': level2_df['target']},remove_columns=['value','source','target'])
|
502 |
-
|
503 |
-
# st.write(updated_dataset)
|
504 |
-
#updated_dataset.push_to_hub('Seetha/visual', token=os.environ.get('HF_TOKEN'))
|
505 |
-
# updated_dataset.push_to_hub('Seetha/Visualization')
|
506 |
df_final1.to_csv('predictions.csv')
|
507 |
csv_file = "predictions.csv"
|
508 |
json_file = "detailedResults.json"
|
@@ -515,15 +502,13 @@ def main():
|
|
515 |
data_list = []
|
516 |
for row in csv_data:
|
517 |
data_list.append(dict(row))
|
518 |
-
|
519 |
# # Convert the list of dictionaries to JSON
|
520 |
json_data = json.dumps(data_list)
|
521 |
|
522 |
-
|
523 |
-
|
524 |
-
|
525 |
-
# #fi.seek(0)
|
526 |
-
with open('detailedResults.json','w') as fi:
|
527 |
#data = json.load(fi)
|
528 |
fi.write(json_data)
|
529 |
|
|
|
71 |
|
72 |
# dataset = load_dataset('Seetha/Visualization', streaming=True)
|
73 |
# df = pd.DataFrame.from_dict(dataset['train'])
|
74 |
+
# DATASET_REPO_URL = "https://huggingface.co/datasets/Seetha/Visualization"
|
75 |
+
# DATA_FILENAME = "level2.json"
|
76 |
#DATA_FILE = os.path.join("data", DATA_FILENAME)
|
77 |
+
DATASET_REPO_URL = "https://huggingface.co/datasets/Seetha/visual_files"
|
78 |
+
DATA_FILENAME = "detailedResults.json"
|
79 |
+
DATA_FILENAME1 = "level2.json"
|
80 |
|
81 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
82 |
+
#st.write("is none?", HF_TOKEN is None)
|
83 |
|
84 |
def main():
|
85 |
|
|
|
484 |
'value': int(df_tab.loc[row, col])
|
485 |
})
|
486 |
|
487 |
+
HfApi().delete_file(path_in_repo = DATA_FILENAME1 ,repo_id = 'Seetha/visual_files',token= HF_TOKEN,repo_type='dataset')
|
488 |
st.write('file-deleted')
|
489 |
fs = HfFileSystem(token=HF_TOKEN)
|
490 |
+
with fs.open('datasets/Seetha/visual_files/level2.json', 'w') as f:
|
491 |
json.dump(json_data, f)
|
492 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
493 |
df_final1.to_csv('predictions.csv')
|
494 |
csv_file = "predictions.csv"
|
495 |
json_file = "detailedResults.json"
|
|
|
502 |
data_list = []
|
503 |
for row in csv_data:
|
504 |
data_list.append(dict(row))
|
505 |
+
|
506 |
# # Convert the list of dictionaries to JSON
|
507 |
json_data = json.dumps(data_list)
|
508 |
|
509 |
+
HfApi().delete_file(path_in_repo = DATA_FILENAME ,repo_id = 'Seetha/visual_files',token= HF_TOKEN,repo_type='dataset')
|
510 |
+
st.write('file2-deleted')
|
511 |
+
with fs.open('datasets/Seetha/visual_files/detailedResults.json','w') as fi:
|
|
|
|
|
512 |
#data = json.load(fi)
|
513 |
fi.write(json_data)
|
514 |
|