{ "architectures": [ "BERTMultiAttentionModel" ], "auto_map": { "AutoConfig": "config.BERTMultiAttentionConfig" }, "dropout": 0.1, "hidden_size": 768, "model_type": "bert_multi_attention", "num_heads": 8, "rnn_bidirectional": true, "rnn_hidden_size": 128, "rnn_num_layers": 2, "torch_dtype": "float32", "transformer": "bert-base-uncased", "transformers_version": "4.37.2" }