File size: 454 Bytes
267e820 |
1 2 3 4 5 6 7 8 9 10 11 12 |
#!/usr/bin/env python3
from transformers import CLIPTextModel
print("This should work!:")
model = CLIPTextModel.from_pretrained("./text_encoder", variant="no_ema")
print("This should work!:")
model = CLIPTextModel.from_pretrained("./text_encoder", variant="fp16")
print("This should work!:")
model = CLIPTextModel.from_pretrained("./text_encoder")
print("This should NOT work!:")
model = CLIPTextModel.from_pretrained("./text_encoder", variant="other")
|