File size: 534 Bytes
21e5dd0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 |
from transformers import LlamaConfig
class LlamaActionConfig(LlamaConfig):
model_type = "llama_action"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.num_spatio_embeddings = kwargs.get("num_spatio_embeddings", 582)
self.num_temporal_embeddings = kwargs.get("num_temporal_embeddings", 25)
self.num_action_embeddings = kwargs.get("num_action_tokens", 5)
self.num_image_patches = kwargs.get("num_image_patches", 576)
self.action_dim = kwargs.get("action_dim", 3)
|