sanarawal7 commited on
Commit
5639b64
·
1 Parent(s): 1a76cb5
Files changed (1) hide show
  1. app.py +24 -13
app.py CHANGED
@@ -1,38 +1,49 @@
1
  import gradio as gr
2
  import torch
3
- from transformers import AutoTokenizer, AutoModelForQuestionAnswering
4
 
5
  # Replace with your Hugging Face API token
6
- hf_api_token = "HF_API_KEY"
7
 
8
- # Load the model and tokenizer globally, not inside the function
9
- model_name = "allenai/Molmo-7B-D-0924"
10
- tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=hf_api_token)
11
- model = AutoModelForQuestionAnswering.from_pretrained(model_name, trust_remote_code=True,use_auth_token=hf_api_token)
12
 
13
  def generate_questions(file_content):
 
 
 
 
 
14
  # Preprocess file content (assuming it's already text)
15
  try:
16
  text = file_content.decode("utf-8")
17
  except Exception as e:
18
  return f"Error decoding file: {str(e)}", ""
19
 
20
- # Generate questions and answers
21
- inputs = tokenizer(text, return_tensors="pt")
22
- outputs = model(**inputs)
23
- answers = tokenizer.decode(outputs.start_logits.argmax(), skip_special_tokens=True)
 
 
 
 
 
 
24
 
25
- # Extract questions and options (basic implementation)
26
  questions = []
27
  options = []
28
- for answer in answers.split("."):
29
  if answer.startswith("Q"):
30
  questions.append(answer.strip())
31
  else:
32
  options.append(answer.strip())
33
 
 
 
 
34
  return questions, options
35
 
 
36
  # Create Gradio interface
37
  question_box = gr.Textbox(label="Questions")
38
  option_box = gr.Textbox(label="Options")
@@ -44,4 +55,4 @@ iface = gr.Interface(
44
  title="Question and Option Generator"
45
  )
46
 
47
- iface.launch()
 
1
  import gradio as gr
2
  import torch
3
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
 
5
  # Replace with your Hugging Face API token
6
+ hf_api_token = "YOUR_API_TOKEN"
7
 
 
 
 
 
8
 
9
  def generate_questions(file_content):
10
+ # Load MolMo model and tokenizer with API token
11
+ model_name = "allenai/Molmo-7B-D-0924"
12
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=hf_api_token)
13
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name, use_auth_token=hf_api_token)
14
+
15
  # Preprocess file content (assuming it's already text)
16
  try:
17
  text = file_content.decode("utf-8")
18
  except Exception as e:
19
  return f"Error decoding file: {str(e)}", ""
20
 
21
+ # Create a prompt
22
+ prompt = f"Here is a text: {text}. Please generate a set of questions based on the content."
23
+
24
+ # Generate questions
25
+ inputs = tokenizer(prompt, return_tensors="pt")
26
+ try:
27
+ outputs = model.generate(**inputs)
28
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
29
+ except Exception as e:
30
+ return f"Error processing file: {str(e)}", ""
31
 
32
+ # Extract questions and options (basic implementation, customize as needed)
33
  questions = []
34
  options = []
35
+ for answer in generated_text.split("."):
36
  if answer.startswith("Q"):
37
  questions.append(answer.strip())
38
  else:
39
  options.append(answer.strip())
40
 
41
+ if not questions:
42
+ return "No questions found in the uploaded text.", ""
43
+
44
  return questions, options
45
 
46
+
47
  # Create Gradio interface
48
  question_box = gr.Textbox(label="Questions")
49
  option_box = gr.Textbox(label="Options")
 
55
  title="Question and Option Generator"
56
  )
57
 
58
+ iface.launch()