jarif commited on
Commit
7c56e78
·
verified ·
1 Parent(s): 7788b02

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -11,7 +11,7 @@ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
11
  logging.basicConfig(level=logging.DEBUG)
12
 
13
  def load_vector_store():
14
- # Directory to load the vector data
15
  persist_directory = "./chroma_db"
16
  if not os.path.exists(persist_directory):
17
  logging.error(f"The directory '{persist_directory}' does not exist. Please run the ingestion script.")
@@ -30,10 +30,10 @@ def load_llm():
30
  'text2text-generation',
31
  model=model,
32
  tokenizer=tokenizer,
33
- max_length=256,
34
  do_sample=True,
35
- temperature=0.3,
36
- top_p=0.95
37
  )
38
  return HuggingFacePipeline(pipeline=pipe)
39
 
@@ -63,7 +63,7 @@ def main():
63
  with st.expander("About the App"):
64
  st.markdown(
65
  """
66
- This is a Generative AI powered Question and Answering app that responds to questions about your PDF File.
67
  """
68
  )
69
  question = st.text_area("Enter your Question")
 
11
  logging.basicConfig(level=logging.DEBUG)
12
 
13
  def load_vector_store():
14
+ # Ensure the directory exists
15
  persist_directory = "./chroma_db"
16
  if not os.path.exists(persist_directory):
17
  logging.error(f"The directory '{persist_directory}' does not exist. Please run the ingestion script.")
 
30
  'text2text-generation',
31
  model=model,
32
  tokenizer=tokenizer,
33
+ max_length=1024, # Increase max_length to allow longer output
34
  do_sample=True,
35
+ temperature=0, # Adjust temperature to control randomness
36
+ top_p=0.9 # Adjust top_p to control diversity of the output
37
  )
38
  return HuggingFacePipeline(pipeline=pipe)
39
 
 
63
  with st.expander("About the App"):
64
  st.markdown(
65
  """
66
+ This is a Generative AI-powered Question and Answering app that responds to questions about your PDF File.
67
  """
68
  )
69
  question = st.text_area("Enter your Question")