DiDustin commited on
Commit
4bb79cf
·
verified ·
1 Parent(s): 7eaf003

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -63,7 +63,7 @@ LANGUAGES = {
63
  loaded_models = {}
64
  loaded_tokenizers = {}
65
 
66
- @spaces.GPU(duration=180)
67
  def load_model_and_tokenizer(model_key):
68
  if model_key not in loaded_models:
69
  model_info = MODELS[model_key]
@@ -84,7 +84,7 @@ def load_model_and_tokenizer(model_key):
84
  tokenizer.pad_token = tokenizer.eos_token
85
  loaded_tokenizers[model_key] = tokenizer
86
 
87
- @spaces.GPU(duration=180)
88
  def generate_text(model_choice, prompt, max_length, temperature, top_p, do_sample):
89
  load_model_and_tokenizer(model_choice)
90
 
@@ -136,7 +136,7 @@ def update_language(selected_language):
136
  )
137
 
138
 
139
- @spaces.GPU(duration=180)
140
  def wrapped_generate_text(model_choice, prompt, max_length, temperature, top_p, do_sample):
141
  return generate_text(model_choice, prompt, max_length, temperature, top_p, do_sample)
142
 
 
63
  loaded_models = {}
64
  loaded_tokenizers = {}
65
 
66
+ @spaces.GPU(duration=60)
67
  def load_model_and_tokenizer(model_key):
68
  if model_key not in loaded_models:
69
  model_info = MODELS[model_key]
 
84
  tokenizer.pad_token = tokenizer.eos_token
85
  loaded_tokenizers[model_key] = tokenizer
86
 
87
+ @spaces.GPU(duration=140)
88
  def generate_text(model_choice, prompt, max_length, temperature, top_p, do_sample):
89
  load_model_and_tokenizer(model_choice)
90
 
 
136
  )
137
 
138
 
139
+ @spaces.GPU(duration=140)
140
  def wrapped_generate_text(model_choice, prompt, max_length, temperature, top_p, do_sample):
141
  return generate_text(model_choice, prompt, max_length, temperature, top_p, do_sample)
142