as-cle-bert commited on
Commit
c0cd7ac
·
verified ·
1 Parent(s): be9ee29

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -3,7 +3,7 @@ from transformers.models.esm.openfold_utils.protein import to_pdb, Protein as OF
3
  from transformers.models.esm.openfold_utils.feats import atom14_to_atom37
4
  from proteins_viz import *
5
  import gradio as gr
6
-
7
 
8
  def convert_outputs_to_pdb(outputs):
9
  final_atom_positions = atom14_to_atom37(outputs["positions"][-1], outputs)
@@ -40,7 +40,7 @@ torch.backends.cuda.matmul.allow_tf32 = True
40
 
41
  model.trunk.set_chunk_size(64)
42
 
43
-
44
  def fold_protein(test_protein):
45
  tokenized_input = tokenizer([test_protein], return_tensors="pt", add_special_tokens=False)['input_ids']
46
  tokenized_input = tokenized_input.cuda()
 
3
  from transformers.models.esm.openfold_utils.feats import atom14_to_atom37
4
  from proteins_viz import *
5
  import gradio as gr
6
+ import spaces
7
 
8
  def convert_outputs_to_pdb(outputs):
9
  final_atom_positions = atom14_to_atom37(outputs["positions"][-1], outputs)
 
40
 
41
  model.trunk.set_chunk_size(64)
42
 
43
+ @spaces.GPU(duration=120)
44
  def fold_protein(test_protein):
45
  tokenized_input = tokenizer([test_protein], return_tensors="pt", add_special_tokens=False)['input_ids']
46
  tokenized_input = tokenized_input.cuda()