Training in progress, step 15000, checkpoint
Browse files- checkpoint-14100/config.json +31 -0
- checkpoint-14100/model.safetensors +3 -0
- checkpoint-14100/training_args.bin +3 -0
- checkpoint-14200/config.json +31 -0
- checkpoint-14200/model.safetensors +3 -0
- checkpoint-14200/training_args.bin +3 -0
- checkpoint-14300/config.json +31 -0
- checkpoint-14300/model.safetensors +3 -0
- checkpoint-14300/training_args.bin +3 -0
- checkpoint-14400/config.json +31 -0
- checkpoint-14400/model.safetensors +3 -0
- checkpoint-14400/training_args.bin +3 -0
- checkpoint-14500/config.json +31 -0
- checkpoint-14500/model.safetensors +3 -0
- checkpoint-14500/training_args.bin +3 -0
- checkpoint-14600/config.json +31 -0
- checkpoint-14600/model.safetensors +3 -0
- checkpoint-14600/training_args.bin +3 -0
- checkpoint-14700/config.json +31 -0
- checkpoint-14700/model.safetensors +3 -0
- checkpoint-14700/training_args.bin +3 -0
- checkpoint-14800/config.json +31 -0
- checkpoint-14800/model.safetensors +3 -0
- checkpoint-14800/training_args.bin +3 -0
- checkpoint-14900/config.json +31 -0
- checkpoint-14900/model.safetensors +3 -0
- checkpoint-14900/training_args.bin +3 -0
- checkpoint-15000/config.json +31 -0
- checkpoint-15000/model.safetensors +3 -0
- checkpoint-15000/training_args.bin +3 -0
checkpoint-14100/config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "georgeyw/gpt-2-small-init-seed-5",
|
3 |
+
"architectures": [
|
4 |
+
"GPTNeoXForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": true,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 0,
|
9 |
+
"classifier_dropout": 0.1,
|
10 |
+
"eos_token_id": 2,
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_dropout": 0.0,
|
13 |
+
"hidden_size": 768,
|
14 |
+
"initializer_range": 0.02,
|
15 |
+
"intermediate_size": 3072,
|
16 |
+
"layer_norm_eps": 1e-05,
|
17 |
+
"layer_norm_epsilon": 1e-05,
|
18 |
+
"max_position_embeddings": 1024,
|
19 |
+
"model_type": "gpt_neox",
|
20 |
+
"num_attention_heads": 12,
|
21 |
+
"num_hidden_layers": 12,
|
22 |
+
"rope_scaling": null,
|
23 |
+
"rotary_emb_base": 10000,
|
24 |
+
"rotary_pct": 0.25,
|
25 |
+
"tie_word_embeddings": false,
|
26 |
+
"torch_dtype": "bfloat16",
|
27 |
+
"transformers_version": "4.38.2",
|
28 |
+
"use_cache": true,
|
29 |
+
"use_parallel_residual": true,
|
30 |
+
"vocab_size": 50304
|
31 |
+
}
|
checkpoint-14100/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5746eab2312a09e4f950ba53e434029d7c0601f5845e4ce41fce5fd99b49f419
|
3 |
+
size 324662984
|
checkpoint-14100/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:de3f522b6b895157d4ae37816ea2b39e4b24555bc3782f9f18492c6709abd779
|
3 |
+
size 6520
|
checkpoint-14200/config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "georgeyw/gpt-2-small-init-seed-5",
|
3 |
+
"architectures": [
|
4 |
+
"GPTNeoXForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": true,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 0,
|
9 |
+
"classifier_dropout": 0.1,
|
10 |
+
"eos_token_id": 2,
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_dropout": 0.0,
|
13 |
+
"hidden_size": 768,
|
14 |
+
"initializer_range": 0.02,
|
15 |
+
"intermediate_size": 3072,
|
16 |
+
"layer_norm_eps": 1e-05,
|
17 |
+
"layer_norm_epsilon": 1e-05,
|
18 |
+
"max_position_embeddings": 1024,
|
19 |
+
"model_type": "gpt_neox",
|
20 |
+
"num_attention_heads": 12,
|
21 |
+
"num_hidden_layers": 12,
|
22 |
+
"rope_scaling": null,
|
23 |
+
"rotary_emb_base": 10000,
|
24 |
+
"rotary_pct": 0.25,
|
25 |
+
"tie_word_embeddings": false,
|
26 |
+
"torch_dtype": "bfloat16",
|
27 |
+
"transformers_version": "4.38.2",
|
28 |
+
"use_cache": true,
|
29 |
+
"use_parallel_residual": true,
|
30 |
+
"vocab_size": 50304
|
31 |
+
}
|
checkpoint-14200/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9fe40e5b91cdbcc9b2de68ebf9a6b2299847ecb845ff77ccf42b73b74627a936
|
3 |
+
size 324662984
|
checkpoint-14200/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:de3f522b6b895157d4ae37816ea2b39e4b24555bc3782f9f18492c6709abd779
|
3 |
+
size 6520
|
checkpoint-14300/config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "georgeyw/gpt-2-small-init-seed-5",
|
3 |
+
"architectures": [
|
4 |
+
"GPTNeoXForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": true,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 0,
|
9 |
+
"classifier_dropout": 0.1,
|
10 |
+
"eos_token_id": 2,
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_dropout": 0.0,
|
13 |
+
"hidden_size": 768,
|
14 |
+
"initializer_range": 0.02,
|
15 |
+
"intermediate_size": 3072,
|
16 |
+
"layer_norm_eps": 1e-05,
|
17 |
+
"layer_norm_epsilon": 1e-05,
|
18 |
+
"max_position_embeddings": 1024,
|
19 |
+
"model_type": "gpt_neox",
|
20 |
+
"num_attention_heads": 12,
|
21 |
+
"num_hidden_layers": 12,
|
22 |
+
"rope_scaling": null,
|
23 |
+
"rotary_emb_base": 10000,
|
24 |
+
"rotary_pct": 0.25,
|
25 |
+
"tie_word_embeddings": false,
|
26 |
+
"torch_dtype": "bfloat16",
|
27 |
+
"transformers_version": "4.38.2",
|
28 |
+
"use_cache": true,
|
29 |
+
"use_parallel_residual": true,
|
30 |
+
"vocab_size": 50304
|
31 |
+
}
|
checkpoint-14300/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9bde6028a5850f35a55edead7ca4360a7ca774eee092bc79bc2043bdba6e80ee
|
3 |
+
size 324662984
|
checkpoint-14300/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:de3f522b6b895157d4ae37816ea2b39e4b24555bc3782f9f18492c6709abd779
|
3 |
+
size 6520
|
checkpoint-14400/config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "georgeyw/gpt-2-small-init-seed-5",
|
3 |
+
"architectures": [
|
4 |
+
"GPTNeoXForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": true,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 0,
|
9 |
+
"classifier_dropout": 0.1,
|
10 |
+
"eos_token_id": 2,
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_dropout": 0.0,
|
13 |
+
"hidden_size": 768,
|
14 |
+
"initializer_range": 0.02,
|
15 |
+
"intermediate_size": 3072,
|
16 |
+
"layer_norm_eps": 1e-05,
|
17 |
+
"layer_norm_epsilon": 1e-05,
|
18 |
+
"max_position_embeddings": 1024,
|
19 |
+
"model_type": "gpt_neox",
|
20 |
+
"num_attention_heads": 12,
|
21 |
+
"num_hidden_layers": 12,
|
22 |
+
"rope_scaling": null,
|
23 |
+
"rotary_emb_base": 10000,
|
24 |
+
"rotary_pct": 0.25,
|
25 |
+
"tie_word_embeddings": false,
|
26 |
+
"torch_dtype": "bfloat16",
|
27 |
+
"transformers_version": "4.38.2",
|
28 |
+
"use_cache": true,
|
29 |
+
"use_parallel_residual": true,
|
30 |
+
"vocab_size": 50304
|
31 |
+
}
|
checkpoint-14400/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4ea2f191e62530d4fb60e382b8714cec4f71b4e851329f69262afc6e9a5d707d
|
3 |
+
size 324662984
|
checkpoint-14400/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:de3f522b6b895157d4ae37816ea2b39e4b24555bc3782f9f18492c6709abd779
|
3 |
+
size 6520
|
checkpoint-14500/config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "georgeyw/gpt-2-small-init-seed-5",
|
3 |
+
"architectures": [
|
4 |
+
"GPTNeoXForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": true,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 0,
|
9 |
+
"classifier_dropout": 0.1,
|
10 |
+
"eos_token_id": 2,
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_dropout": 0.0,
|
13 |
+
"hidden_size": 768,
|
14 |
+
"initializer_range": 0.02,
|
15 |
+
"intermediate_size": 3072,
|
16 |
+
"layer_norm_eps": 1e-05,
|
17 |
+
"layer_norm_epsilon": 1e-05,
|
18 |
+
"max_position_embeddings": 1024,
|
19 |
+
"model_type": "gpt_neox",
|
20 |
+
"num_attention_heads": 12,
|
21 |
+
"num_hidden_layers": 12,
|
22 |
+
"rope_scaling": null,
|
23 |
+
"rotary_emb_base": 10000,
|
24 |
+
"rotary_pct": 0.25,
|
25 |
+
"tie_word_embeddings": false,
|
26 |
+
"torch_dtype": "bfloat16",
|
27 |
+
"transformers_version": "4.38.2",
|
28 |
+
"use_cache": true,
|
29 |
+
"use_parallel_residual": true,
|
30 |
+
"vocab_size": 50304
|
31 |
+
}
|
checkpoint-14500/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:27cf9555710565b1c05c3f2ace81be1c2fd0393a545ece1a949aa6279ea28012
|
3 |
+
size 324662984
|
checkpoint-14500/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:de3f522b6b895157d4ae37816ea2b39e4b24555bc3782f9f18492c6709abd779
|
3 |
+
size 6520
|
checkpoint-14600/config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "georgeyw/gpt-2-small-init-seed-5",
|
3 |
+
"architectures": [
|
4 |
+
"GPTNeoXForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": true,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 0,
|
9 |
+
"classifier_dropout": 0.1,
|
10 |
+
"eos_token_id": 2,
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_dropout": 0.0,
|
13 |
+
"hidden_size": 768,
|
14 |
+
"initializer_range": 0.02,
|
15 |
+
"intermediate_size": 3072,
|
16 |
+
"layer_norm_eps": 1e-05,
|
17 |
+
"layer_norm_epsilon": 1e-05,
|
18 |
+
"max_position_embeddings": 1024,
|
19 |
+
"model_type": "gpt_neox",
|
20 |
+
"num_attention_heads": 12,
|
21 |
+
"num_hidden_layers": 12,
|
22 |
+
"rope_scaling": null,
|
23 |
+
"rotary_emb_base": 10000,
|
24 |
+
"rotary_pct": 0.25,
|
25 |
+
"tie_word_embeddings": false,
|
26 |
+
"torch_dtype": "bfloat16",
|
27 |
+
"transformers_version": "4.38.2",
|
28 |
+
"use_cache": true,
|
29 |
+
"use_parallel_residual": true,
|
30 |
+
"vocab_size": 50304
|
31 |
+
}
|
checkpoint-14600/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0a3bbae9d0905b61bd83b75cdd1e7b0b8d19969ac59445b2f3920f0b687508e8
|
3 |
+
size 324662984
|
checkpoint-14600/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:de3f522b6b895157d4ae37816ea2b39e4b24555bc3782f9f18492c6709abd779
|
3 |
+
size 6520
|
checkpoint-14700/config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "georgeyw/gpt-2-small-init-seed-5",
|
3 |
+
"architectures": [
|
4 |
+
"GPTNeoXForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": true,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 0,
|
9 |
+
"classifier_dropout": 0.1,
|
10 |
+
"eos_token_id": 2,
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_dropout": 0.0,
|
13 |
+
"hidden_size": 768,
|
14 |
+
"initializer_range": 0.02,
|
15 |
+
"intermediate_size": 3072,
|
16 |
+
"layer_norm_eps": 1e-05,
|
17 |
+
"layer_norm_epsilon": 1e-05,
|
18 |
+
"max_position_embeddings": 1024,
|
19 |
+
"model_type": "gpt_neox",
|
20 |
+
"num_attention_heads": 12,
|
21 |
+
"num_hidden_layers": 12,
|
22 |
+
"rope_scaling": null,
|
23 |
+
"rotary_emb_base": 10000,
|
24 |
+
"rotary_pct": 0.25,
|
25 |
+
"tie_word_embeddings": false,
|
26 |
+
"torch_dtype": "bfloat16",
|
27 |
+
"transformers_version": "4.38.2",
|
28 |
+
"use_cache": true,
|
29 |
+
"use_parallel_residual": true,
|
30 |
+
"vocab_size": 50304
|
31 |
+
}
|
checkpoint-14700/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3816c7692f9ac6e63f79e4997fbb2d28cb5506d769575a4b06c958516206ee49
|
3 |
+
size 324662984
|
checkpoint-14700/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:de3f522b6b895157d4ae37816ea2b39e4b24555bc3782f9f18492c6709abd779
|
3 |
+
size 6520
|
checkpoint-14800/config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "georgeyw/gpt-2-small-init-seed-5",
|
3 |
+
"architectures": [
|
4 |
+
"GPTNeoXForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": true,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 0,
|
9 |
+
"classifier_dropout": 0.1,
|
10 |
+
"eos_token_id": 2,
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_dropout": 0.0,
|
13 |
+
"hidden_size": 768,
|
14 |
+
"initializer_range": 0.02,
|
15 |
+
"intermediate_size": 3072,
|
16 |
+
"layer_norm_eps": 1e-05,
|
17 |
+
"layer_norm_epsilon": 1e-05,
|
18 |
+
"max_position_embeddings": 1024,
|
19 |
+
"model_type": "gpt_neox",
|
20 |
+
"num_attention_heads": 12,
|
21 |
+
"num_hidden_layers": 12,
|
22 |
+
"rope_scaling": null,
|
23 |
+
"rotary_emb_base": 10000,
|
24 |
+
"rotary_pct": 0.25,
|
25 |
+
"tie_word_embeddings": false,
|
26 |
+
"torch_dtype": "bfloat16",
|
27 |
+
"transformers_version": "4.38.2",
|
28 |
+
"use_cache": true,
|
29 |
+
"use_parallel_residual": true,
|
30 |
+
"vocab_size": 50304
|
31 |
+
}
|
checkpoint-14800/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c6509a66a29d47396cb45dbb4acc93d5e0b8ba1e9688b0c1adf8e7223746aa5c
|
3 |
+
size 324662984
|
checkpoint-14800/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:de3f522b6b895157d4ae37816ea2b39e4b24555bc3782f9f18492c6709abd779
|
3 |
+
size 6520
|
checkpoint-14900/config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "georgeyw/gpt-2-small-init-seed-5",
|
3 |
+
"architectures": [
|
4 |
+
"GPTNeoXForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": true,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 0,
|
9 |
+
"classifier_dropout": 0.1,
|
10 |
+
"eos_token_id": 2,
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_dropout": 0.0,
|
13 |
+
"hidden_size": 768,
|
14 |
+
"initializer_range": 0.02,
|
15 |
+
"intermediate_size": 3072,
|
16 |
+
"layer_norm_eps": 1e-05,
|
17 |
+
"layer_norm_epsilon": 1e-05,
|
18 |
+
"max_position_embeddings": 1024,
|
19 |
+
"model_type": "gpt_neox",
|
20 |
+
"num_attention_heads": 12,
|
21 |
+
"num_hidden_layers": 12,
|
22 |
+
"rope_scaling": null,
|
23 |
+
"rotary_emb_base": 10000,
|
24 |
+
"rotary_pct": 0.25,
|
25 |
+
"tie_word_embeddings": false,
|
26 |
+
"torch_dtype": "bfloat16",
|
27 |
+
"transformers_version": "4.38.2",
|
28 |
+
"use_cache": true,
|
29 |
+
"use_parallel_residual": true,
|
30 |
+
"vocab_size": 50304
|
31 |
+
}
|
checkpoint-14900/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eebce53f0bf285b4ef17ca9d76021e2ea04434584e514d801d6d9717c071efc7
|
3 |
+
size 324662984
|
checkpoint-14900/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:de3f522b6b895157d4ae37816ea2b39e4b24555bc3782f9f18492c6709abd779
|
3 |
+
size 6520
|
checkpoint-15000/config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "georgeyw/gpt-2-small-init-seed-5",
|
3 |
+
"architectures": [
|
4 |
+
"GPTNeoXForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": true,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 0,
|
9 |
+
"classifier_dropout": 0.1,
|
10 |
+
"eos_token_id": 2,
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_dropout": 0.0,
|
13 |
+
"hidden_size": 768,
|
14 |
+
"initializer_range": 0.02,
|
15 |
+
"intermediate_size": 3072,
|
16 |
+
"layer_norm_eps": 1e-05,
|
17 |
+
"layer_norm_epsilon": 1e-05,
|
18 |
+
"max_position_embeddings": 1024,
|
19 |
+
"model_type": "gpt_neox",
|
20 |
+
"num_attention_heads": 12,
|
21 |
+
"num_hidden_layers": 12,
|
22 |
+
"rope_scaling": null,
|
23 |
+
"rotary_emb_base": 10000,
|
24 |
+
"rotary_pct": 0.25,
|
25 |
+
"tie_word_embeddings": false,
|
26 |
+
"torch_dtype": "bfloat16",
|
27 |
+
"transformers_version": "4.38.2",
|
28 |
+
"use_cache": true,
|
29 |
+
"use_parallel_residual": true,
|
30 |
+
"vocab_size": 50304
|
31 |
+
}
|
checkpoint-15000/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fcf99c8f574f2ea9de76097a2e2ad8e8fa94d560765f95556c5e849c3d7c577c
|
3 |
+
size 324662984
|
checkpoint-15000/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:de3f522b6b895157d4ae37816ea2b39e4b24555bc3782f9f18492c6709abd779
|
3 |
+
size 6520
|