Darka001 commited on
Commit
224fd41
·
verified ·
1 Parent(s): 5f771ff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -76,10 +76,10 @@ tokenizer.padding_side = "right"
76
  # print("=" * 80)
77
 
78
 
79
- # model = AutoModelForCausalLM.from_pretrained(
80
- # model_name,
81
- # quantization_config=bnb_config,
82
- # )
83
  stop_list = [" \n\nAnswer:", " \n", " \n\n"]
84
  stop_token_ids = [tokenizer(x, return_tensors='pt', add_special_tokens=False)['input_ids'] for x in stop_list]
85
  stop_token_ids = [torch.LongTensor(x).to("cuda") for x in stop_token_ids]
@@ -107,7 +107,7 @@ text_generation_pipeline = pipeline(
107
  )
108
  mistral_llm = HuggingFacePipeline(pipeline=text_generation_pipeline)
109
  # # # load chroma from disk
110
- db3 = Chroma(persist_directory="/content/gdrive/MyDrive/diploma/all_emb/chroma/", embedding_function=instructor_embeddings)
111
 
112
 
113
 
 
76
  # print("=" * 80)
77
 
78
 
79
+ model = AutoModelForCausalLM.from_pretrained(
80
+ model_name,
81
+
82
+ )
83
  stop_list = [" \n\nAnswer:", " \n", " \n\n"]
84
  stop_token_ids = [tokenizer(x, return_tensors='pt', add_special_tokens=False)['input_ids'] for x in stop_list]
85
  stop_token_ids = [torch.LongTensor(x).to("cuda") for x in stop_token_ids]
 
107
  )
108
  mistral_llm = HuggingFacePipeline(pipeline=text_generation_pipeline)
109
  # # # load chroma from disk
110
+ db3 = Chroma(persist_directory="/chroma/", embedding_function=instructor_embeddings)
111
 
112
 
113