alexkueck commited on
Commit
d7b7c3a
·
1 Parent(s): 216be95

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -9
app.py CHANGED
@@ -27,8 +27,8 @@ _ = load_dotenv(find_dotenv())
27
 
28
 
29
  # access token with permission to access the model and PRO subscription
30
- HUGGINGFACEHUB_API_TOKEN = os.getenv("HF_ACCESS_READ")
31
- #login(token=os.environ["HF_ACCESS_READ"])
32
 
33
  OAI_API_KEY=os.getenv("OPENAI_API_KEY")
34
 
@@ -72,14 +72,9 @@ splittet = False
72
  ##############################################
73
  print ("Inf.Client")
74
  #client = InferenceClient("https://api-inference.huggingface.co/models/meta-llama/Llama-2-70b-chat-hf")
75
- #client = InferenceClient("https://ybdhvwle4ksrawzo.eu-west-1.aws.endpoints.huggingface.cloud")
76
  #client = InferenceClient(model="TheBloke/Yi-34B-Chat-GGUF")
77
 
78
- #list of models available
79
- client = InferenceClient()
80
- print("List of models ......................:")
81
- print(client.list_deployed_models("text-generation-inference"))
82
-
83
  ##############################################
84
  # tokenizer for generating prompt
85
  ##############################################
@@ -87,7 +82,21 @@ print ("Tokenizer")
87
  #tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-70b-chat-hf")
88
  #tokenizer = AutoTokenizer.from_pretrained("TheBloke/Yi-34B-Chat-GGUF")
89
  #tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
90
- tokenizer = AutoTokenizer.from_pretrained("bigcode/starcoder")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
 
93
  #################################################
 
27
 
28
 
29
  # access token with permission to access the model and PRO subscription
30
+ #HUGGINGFACEHUB_API_TOKEN = os.getenv("HF_ACCESS_READ")
31
+ login(token=os.environ["HF_ACCESS_READ"])
32
 
33
  OAI_API_KEY=os.getenv("OPENAI_API_KEY")
34
 
 
72
  ##############################################
73
  print ("Inf.Client")
74
  #client = InferenceClient("https://api-inference.huggingface.co/models/meta-llama/Llama-2-70b-chat-hf")
75
+ client = InferenceClient("https://ybdhvwle4ksrawzo.eu-west-1.aws.endpoints.huggingface.cloud")
76
  #client = InferenceClient(model="TheBloke/Yi-34B-Chat-GGUF")
77
 
 
 
 
 
 
78
  ##############################################
79
  # tokenizer for generating prompt
80
  ##############################################
 
82
  #tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-70b-chat-hf")
83
  #tokenizer = AutoTokenizer.from_pretrained("TheBloke/Yi-34B-Chat-GGUF")
84
  #tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
85
+ tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1")
86
+
87
+ ##############################################
88
+ # Zum Testen:
89
+ #list of models available
90
+ #client = InferenceClient()
91
+ #print("List of models ......................:")
92
+ #print(client.list_deployed_models("text-generation-inference"))
93
+
94
+ #angezeigt am 17.12.2023:
95
+ #{'text-generation': ['bigcode/starcoder','bigscience/bloom','codellama/CodeLlama-13b-hf','codellama/CodeLlama-34b-Instruct-hf','HuggingFaceH4/zephyr-7b-beta','HuggingFaceM4/idefics-80b-instruct', 'meta-llama/Llama-2-70b-chat-hf',
96
+ #'mistralai/Mistral-7B-Instruct-v0.1','mistralai/Mistral-7B-v0.1', 'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5','openchat/openchat_3.5','TheBloke/vicuna-7B-v1.5-GPTQ','tiiuae/falcon-180B-chat','tiiuae/falcon-7b',
97
+ #'tiiuae/falcon-7b-instruct'],'text2text-generation': ['google/flan-t5-xxl']}
98
+
99
+
100
 
101
 
102
  #################################################