XudongShen commited on
Commit
8bf1d9f
·
1 Parent(s): 096cf06

duplicate text_config and text_config_dict and for vision_config

Browse files
Files changed (2) hide show
  1. config.json +153 -0
  2. onnx/model.onnx +0 -3
config.json CHANGED
@@ -83,6 +83,82 @@
83
  "use_bfloat16": false,
84
  "vocab_size": 49409
85
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  "torch_dtype": "float32",
87
  "transformers_version": null,
88
  "vision_config_dict": {
@@ -161,5 +237,82 @@
161
  "transformers_version": "4.27.1",
162
  "typical_p": 1.0,
163
  "use_bfloat16": false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  }
165
  }
 
83
  "use_bfloat16": false,
84
  "vocab_size": 49409
85
  },
86
+ "text_config": {
87
+ "_name_or_path": "",
88
+ "add_cross_attention": false,
89
+ "architectures": null,
90
+ "attention_dropout": 0.0,
91
+ "bad_words_ids": null,
92
+ "begin_suppress_tokens": null,
93
+ "bos_token_id": 0,
94
+ "chunk_size_feed_forward": 0,
95
+ "cross_attention_hidden_size": null,
96
+ "decoder_start_token_id": null,
97
+ "diversity_penalty": 0.0,
98
+ "do_sample": false,
99
+ "early_stopping": false,
100
+ "encoder_no_repeat_ngram_size": 0,
101
+ "eos_token_id": 49407,
102
+ "exponential_decay_length_penalty": null,
103
+ "finetuning_task": null,
104
+ "forced_bos_token_id": null,
105
+ "forced_eos_token_id": null,
106
+ "hidden_act": "quick_gelu",
107
+ "hidden_size": 512,
108
+ "id2label": {
109
+ "0": "LABEL_0",
110
+ "1": "LABEL_1"
111
+ },
112
+ "initializer_factor": 1.0,
113
+ "initializer_range": 0.02,
114
+ "intermediate_size": 2048,
115
+ "is_decoder": false,
116
+ "is_encoder_decoder": false,
117
+ "label2id": {
118
+ "LABEL_0": 0,
119
+ "LABEL_1": 1
120
+ },
121
+ "layer_norm_eps": 1e-05,
122
+ "length_penalty": 1.0,
123
+ "max_length": 20,
124
+ "max_position_embeddings": 77,
125
+ "min_length": 0,
126
+ "model_type": "clip_text_model",
127
+ "no_repeat_ngram_size": 0,
128
+ "num_attention_heads": 8,
129
+ "num_beam_groups": 1,
130
+ "num_beams": 1,
131
+ "num_hidden_layers": 12,
132
+ "num_return_sequences": 1,
133
+ "output_attentions": false,
134
+ "output_hidden_states": false,
135
+ "output_scores": false,
136
+ "pad_token_id": 49408,
137
+ "prefix": null,
138
+ "problem_type": null,
139
+ "projection_dim": 512,
140
+ "pruned_heads": {},
141
+ "remove_invalid_values": false,
142
+ "repetition_penalty": 1.0,
143
+ "return_dict": true,
144
+ "return_dict_in_generate": false,
145
+ "sep_token_id": null,
146
+ "suppress_tokens": null,
147
+ "task_specific_params": null,
148
+ "temperature": 1.0,
149
+ "tf_legacy_loss": false,
150
+ "tie_encoder_decoder": false,
151
+ "tie_word_embeddings": true,
152
+ "tokenizer_class": null,
153
+ "top_k": 50,
154
+ "top_p": 1.0,
155
+ "torch_dtype": null,
156
+ "torchscript": false,
157
+ "transformers_version": "4.27.1",
158
+ "typical_p": 1.0,
159
+ "use_bfloat16": false,
160
+ "vocab_size": 49409
161
+ },
162
  "torch_dtype": "float32",
163
  "transformers_version": null,
164
  "vision_config_dict": {
 
237
  "transformers_version": "4.27.1",
238
  "typical_p": 1.0,
239
  "use_bfloat16": false
240
+ },
241
+ "vision_config": {
242
+ "_name_or_path": "",
243
+ "add_cross_attention": false,
244
+ "architectures": null,
245
+ "attention_dropout": 0.0,
246
+ "bad_words_ids": null,
247
+ "begin_suppress_tokens": null,
248
+ "bos_token_id": null,
249
+ "chunk_size_feed_forward": 0,
250
+ "cross_attention_hidden_size": null,
251
+ "decoder_start_token_id": null,
252
+ "diversity_penalty": 0.0,
253
+ "do_sample": false,
254
+ "early_stopping": false,
255
+ "encoder_no_repeat_ngram_size": 0,
256
+ "eos_token_id": null,
257
+ "exponential_decay_length_penalty": null,
258
+ "finetuning_task": null,
259
+ "forced_bos_token_id": null,
260
+ "forced_eos_token_id": null,
261
+ "hidden_act": "quick_gelu",
262
+ "hidden_size": 768,
263
+ "id2label": {
264
+ "0": "LABEL_0",
265
+ "1": "LABEL_1"
266
+ },
267
+ "image_size": 224,
268
+ "initializer_factor": 1.0,
269
+ "initializer_range": 0.02,
270
+ "intermediate_size": 3072,
271
+ "is_decoder": false,
272
+ "is_encoder_decoder": false,
273
+ "label2id": {
274
+ "LABEL_0": 0,
275
+ "LABEL_1": 1
276
+ },
277
+ "layer_norm_eps": 1e-05,
278
+ "length_penalty": 1.0,
279
+ "max_length": 20,
280
+ "min_length": 0,
281
+ "model_type": "clip_vision_model",
282
+ "no_repeat_ngram_size": 0,
283
+ "num_attention_heads": 12,
284
+ "num_beam_groups": 1,
285
+ "num_beams": 1,
286
+ "num_channels": 3,
287
+ "num_hidden_layers": 12,
288
+ "num_return_sequences": 1,
289
+ "output_attentions": false,
290
+ "output_hidden_states": false,
291
+ "output_scores": false,
292
+ "pad_token_id": null,
293
+ "patch_size": 32,
294
+ "prefix": null,
295
+ "problem_type": null,
296
+ "projection_dim": 512,
297
+ "pruned_heads": {},
298
+ "remove_invalid_values": false,
299
+ "repetition_penalty": 1.0,
300
+ "return_dict": true,
301
+ "return_dict_in_generate": false,
302
+ "sep_token_id": null,
303
+ "suppress_tokens": null,
304
+ "task_specific_params": null,
305
+ "temperature": 1.0,
306
+ "tf_legacy_loss": false,
307
+ "tie_encoder_decoder": false,
308
+ "tie_word_embeddings": true,
309
+ "tokenizer_class": null,
310
+ "top_k": 50,
311
+ "top_p": 1.0,
312
+ "torch_dtype": null,
313
+ "torchscript": false,
314
+ "transformers_version": "4.27.1",
315
+ "typical_p": 1.0,
316
+ "use_bfloat16": false
317
  }
318
  }
onnx/model.onnx DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:93d98a549911ed16a76f489a76733cd2a49af618b478b8f1aac6ff4ea16f9e6f
3
- size 605763029