alexkueck commited on
Commit
e932e97
·
1 Parent(s): 538d4a5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -29,7 +29,7 @@ _ = load_dotenv(find_dotenv())
29
  # access token with permission to access the model and PRO subscription
30
  HUGGINGFACEHUB_API_TOKEN = os.getenv("HF_ACCESS_READ")
31
  os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN
32
- login(token=os.environ["HF_ACCESS_READ"])
33
 
34
  OAI_API_KEY=os.getenv("OPENAI_API_KEY")
35
 
@@ -72,16 +72,16 @@ splittet = False
72
  # inference client
73
  ##############################################
74
  print ("Inf.Client")
75
- client = InferenceClient("https://api-inference.huggingface.co/models/meta-llama/Llama-2-70b-chat-hf")
76
  #client = InferenceClient("https://ybdhvwle4ksrawzo.eu-west-1.aws.endpoints.huggingface.cloud")
77
- #client = InferenceClient(model="TheBloke/Yi-34B-Chat-GGUF")
78
 
79
  ##############################################
80
  # tokenizer for generating prompt
81
  ##############################################
82
  print ("Tokenizer")
83
- tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-70b-chat-hf")
84
- #tokenizer = AutoTokenizer.from_pretrained("TheBloke/Yi-34B-Chat-GGUF")
85
  #tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
86
  #tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1")
87
 
 
29
  # access token with permission to access the model and PRO subscription
30
  HUGGINGFACEHUB_API_TOKEN = os.getenv("HF_ACCESS_READ")
31
  os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN
32
+ #login(token=os.environ["HF_ACCESS_READ"])
33
 
34
  OAI_API_KEY=os.getenv("OPENAI_API_KEY")
35
 
 
72
  # inference client
73
  ##############################################
74
  print ("Inf.Client")
75
+ #client = InferenceClient("https://api-inference.huggingface.co/models/meta-llama/Llama-2-70b-chat-hf")
76
  #client = InferenceClient("https://ybdhvwle4ksrawzo.eu-west-1.aws.endpoints.huggingface.cloud")
77
+ #client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta")
78
 
79
  ##############################################
80
  # tokenizer for generating prompt
81
  ##############################################
82
  print ("Tokenizer")
83
+ #tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-70b-chat-hf")
84
+ tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
85
  #tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
86
  #tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1")
87