File size: 5,728 Bytes
7a68baa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d8eec6b
7a68baa
 
 
 
 
 
 
 
 
 
 
 
d8eec6b
7a68baa
 
 
d8eec6b
7a68baa
 
 
 
 
 
 
 
 
 
 
 
 
d8eec6b
7a68baa
d8eec6b
7a68baa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d8eec6b
8154130
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
# import gradio as gr
# import json
# import os
# import sys
# import csv
# import requests
# import json
# import pandas as pd
# import concurrent.futures
# from tqdm import tqdm
# import shutil
# import numpy as np
# from matplotlib import pyplot as plt
# import pickle


# # Read list to memory
# def read_list():
#     # for reading also binary mode is important
#     with open('mean_aoc_all_papers.pkl', 'rb') as fp:
#         n_list = pickle.load(fp)
#         return n_list

# mean_citation_list = read_list()

# def generate_plot_maoc(input_maoc):
#   sns.set(font_scale = 8)
#   sns.set(rc={'figure.figsize':(10,6)})
#   sns.set_style(style='whitegrid')

#   ax = sns.histplot(mean_citation_list, bins=100, kde=True, color='skyblue')
#   kdeline = ax.lines[0]
#   xs = kdeline.get_xdata()
#   ys = kdeline.get_ydata()

#   interpolated_y_maoc = np.interp(input_maoc, kdeline.get_xdata(), kdeline.get_ydata())
#   ax.scatter(input_maoc, interpolated_y_maoc,c='r', marker='*',linewidths=5, zorder=2)
#   ax.vlines(input_maoc, 0, interpolated_y_maoc, color='tomato', ls='--', lw=2)
#   epsilon = 0.3
#   ax.text(input_maoc + epsilon, interpolated_y_maoc + epsilon, 'Your paper', {'color': '#DC143C', 'fontsize': 13})
#   ax.set_xlabel("mean Age of Citation(mAoC)",fontsize=15)
#   ax.set_ylabel("Number of papers",fontsize=15)
#   ax.tick_params(axis='both', which='major', labelsize=12)
#   return plt

# # sent a request
# def request_to_respose(request_url):
#   request_response = requests.get(request_url, headers={'x-api-key': 'qZWKkOKyzP5g9fgjyMmBt1MN2NTC6aT61UklAiyw'})
#   return request_response

# def return_clear():
#   return None, None, None, None, None


# def compute_output(ssid_paper_id):
#   output_num_ref = 0
#   output_maoc = 0
#   oldest_paper_list = ""

#   request_url = f'https://api.semanticscholar.org/graph/v1/paper/{ssid_paper_id}?fields=references,title,venue,year'
#   r = request_to_respose(request_url)
#   if r.status_code == 200: # if successful request
#     s2_ref_paper_keys = [reference_paper_tuple['paperId'] for reference_paper_tuple in r.json()['references']]
#     filtered_s2_ref_paper_keys = [s2_ref_paper_key for s2_ref_paper_key in s2_ref_paper_keys if s2_ref_paper_key is not None]
#     total_references = len(s2_ref_paper_keys)
#     none_references = (len(s2_ref_paper_keys) - len(filtered_s2_ref_paper_keys))
#     s2_ref_paper_keys = filtered_s2_ref_paper_keys

#     print(r.json())

#     s2_paper_key, title, venue, year = r.json()['paperId'], r.json()['title'], r.json()['venue'], r.json()['year']
#     reference_year_list = []
#     reference_title_list = []
#     for ref_paper_key in s2_ref_paper_keys:
#       request_url_ref = f'https://api.semanticscholar.org/graph/v1/paper/{ref_paper_key}?fields=references,title,venue,year'
#       r_ref = request_to_respose(request_url_ref)
#       if r_ref.status_code == 200:
#         s2_paper_key_ref, title_ref, venue_ref, year_ref = r_ref.json()['paperId'], r_ref.json()['title'], r_ref.json()['venue'], r_ref.json()['year']
#         reference_year_list.append(year_ref)
#         reference_title_list.append(title_ref)
    
#     print(f'Number of references for which we got the year = {len(reference_year_list)}')
#     output_num_ref = len(reference_year_list)
#     aoc_list = [year - year_ref for year_ref in reference_year_list]
#     output_maoc = sum(aoc_list)/len(aoc_list)

#     sorted_ref_title_list = [x for _,x in sorted(zip(reference_year_list,reference_title_list))]
#     sorted_ref_year_list = [x for x,_ in sorted(zip(reference_year_list,reference_title_list))]
#     text = ""
#     sorted_ref_title_list = sorted_ref_title_list[:min(len(sorted_ref_title_list), 5)]
#     sorted_ref_year_list = sorted_ref_year_list[:min(len(sorted_ref_year_list), 5)]
#     for i in range(len(sorted_ref_year_list)):
#       text += '[' + str(sorted_ref_year_list[i]) + ']' + " Title: " + sorted_ref_title_list[i] + '\n'
    
#     oldest_paper_list = text
#     plot_maoc = generate_plot_maoc(output_maoc)
#     print(plot_maoc)
  
#   return output_num_ref, output_maoc, oldest_paper_list, gr.update(value=plot_maoc)


# with gr.Blocks() as demo:
#   ss_paper_id = gr.Textbox(label='Semantic Scholar ID',placeholder="Enter the Semantic Scholar ID here and press enter...", lines=1)
#   submit_btn = gr.Button("Generate")
#   with gr.Row():
#     num_ref = gr.Textbox(label="Number of references")
#     mAoc = gr.Textbox(label="Mean AoC")
#   with gr.Row():
#     oldest_paper_list = gr.Textbox(label="Top 5 oldest papers cited:",lines=5)
#   with gr.Row():
#     mAocPlot = gr.Plot(label="Plot")
  
#   clear_btn = gr.Button("Clear")
  
#   submit_btn.click(fn = compute_output, inputs = [ss_paper_id], outputs = [num_ref, mAoc, oldest_paper_list, mAocPlot])
#   # clear_btn.click(lambda: None, None, None, queue=False)
#   clear_btn.click(fn = return_clear, inputs=[], outputs=[ss_paper_id, num_ref, mAoc, oldest_paper_list, mAocPlot])

# demo.launch()

import openai
import gradio

openai.api_key = "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"

messages = [{"role": "system", "content": "Anhub Online Education Tutor for Any Subjects:"}]

def CustomChatGPT(user_input):
    messages.append({"role": "user", "content": user_input})
    response = openai.ChatCompletion.create(
        model = "gpt-3.5-turbo",
        messages = messages
    )
    ChatGPT_reply = response["choices"][0]["message"]["content"]
    messages.append({"role": "assistant", "content": ChatGPT_reply})
    return ChatGPT_reply

demo = gradio.Interface(fn=CustomChatGPT, inputs = "text", outputs = "text", title = "Anhub Metaverse Education Online Tutor for Any Subjects and any Languages @ 24 x 7:")



demo.launch()