kentof commited on
Commit
9a4330f
·
verified ·
1 Parent(s): 686f409

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -53,3 +53,32 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ 0_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
57
+ 10_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
58
+ 11_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
59
+ 12_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
60
+ 13_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
61
+ 14_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
62
+ 15_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
63
+ 16_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
64
+ 17_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
65
+ 18_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
66
+ 19_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
67
+ 1_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
68
+ 20_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
69
+ 21_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
70
+ 22_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
71
+ 23_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
72
+ 24_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
73
+ 25_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
74
+ 26_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
75
+ 27_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
76
+ 28_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
77
+ 2_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
78
+ 3_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
79
+ 4_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
80
+ 5_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
81
+ 6_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
82
+ 7_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
83
+ 8_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
84
+ 9_wiki_text.jsonl filter=lfs diff=lfs merge=lfs -text
0_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10541a3c2a3dbbf977660f94dbbd56ec68e19c4167072f05d4c2bb1aa198a0ad
3
+ size 70744218
10_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff0592fea6e488ef184db0f6a74e3d823ff24e1925dd3ddd7112ecb55dfec7ba
3
+ size 70640318
11_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb04a2460200aa8cfa7415a12290f53e16e66ecec1892e43835bab020d3c319a
3
+ size 70412276
12_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f5e3fbfe35c42b1bdfcf36e6482164c9aa2bb3af2e41f7c2904a9b8b0ee6204
3
+ size 70334550
13_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd11f4fa79f339a740d0914d8ba6d1d4557c8f784a3cd6ff747955680662c19a
3
+ size 70220773
14_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6f37dae0c2597cb8fc31546e85f6c3892842ab946218671e56719d4d9845966
3
+ size 70273525
15_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bb8720b5fd0e7ab4622654350954761f18cf7ab7a2a2d2239001564dabf1d6f
3
+ size 70705167
16_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1de612d1881e3160827df37725eb4fb235390677a118e7efe1c39134705e6da2
3
+ size 70698679
17_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b08991cf009d14c9a61ff67ccd8caba31786143a0984ca85e832ec245d66a2b4
3
+ size 70609657
18_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:367955db6618a61691711198812895dac7f1c2a2d7c027ee868ea93f8a5da861
3
+ size 70457095
19_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40116b24d7ae539935c77af4d0b48ac5cd34ae5a69356cd403e4bba41d9a4255
3
+ size 70493539
1_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba9f0524f7bedd672d8f341a4cca26f49fb89c0bc84a5a0703f7d42b3746a0be
3
+ size 70569000
20_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93cb20dd4004864780108a349cbd214f0d328d666a278e67422ec027676d7418
3
+ size 70161704
21_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ed51da09e8bb1ae93104f6338530cd1821ca81ebc4b87bf09674983518f2813
3
+ size 70329993
22_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3857341cb263b34d48d64cd3b7006c69e6f71d2d6c1ffd4a7530cc02c8bc1e2e
3
+ size 70103706
23_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:530b772b22ffa94292eca3ca40c19bfe6244cf768f4dfd0060691e2a99b01a7f
3
+ size 70639385
24_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9946cc853caa4152bd38483ae3980c00bd512b3220d78a146e57496bda9b420b
3
+ size 70661561
25_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ff58cf77c7f19c11a431e9dd3454210b7f04f78127709cccf4aa522cddb8b3e
3
+ size 70737645
26_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1ba95fff8ac3a1f468560f9b62e69e56e7b2fd39e45098f408dce8c7f9eb7f2
3
+ size 70561061
27_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c53524d8a6815109093703704e3402aaebb93d8734f3f708c9a0aad425fa2f4b
3
+ size 70513091
28_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ed3ea80f439a3beef50e36be23981cee4a45a33ae69ddb491975f9e2679e248
3
+ size 49619728
2_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:092180e0cd4493d7879acdaf3daafa9a6bd15b5abb78f29fb8cb11227aaf79db
3
+ size 70772381
3_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8fc191f857344c4d6d2cde92b622ba484e605b5605a2844b8b66f8ea0bd5638
3
+ size 70144457
4_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afe7ef04691fca3b46f9e48f2dc86f4b80ef623ce1f9b96a3a799b64015b4c6e
3
+ size 70230001
5_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec7f00efae39b64a5d8f770e8080cb3b8d3037fefda90dcee81e2df9928ab84b
3
+ size 70258040
6_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bafa2bc191134323864c0058303ca75ff410532076a5c582a2fcb30ec248668d
3
+ size 70347359
7_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68b00652dc17b525c3d93c32a8ad91e6623d1a84d6ae523b35c49f636c6191bf
3
+ size 70423991
8_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f63d3d9b14181fa8f265dde37c3bfae6e47b71358f4ea52cffe0952f399bd386
3
+ size 70742808
9_wiki_text.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90a86fcd8903dfdaa0e414484b057afbc550f6cbbb7ed306bd23a683711fd83f
3
+ size 70582194
make_pack_jsonl_for_wiki.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from tqdm import tqdm
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
+ import re
6
+ import concurrent.futures
7
+ from pathlib import Path
8
+
9
+ # 検索するディレクトリのパス
10
+ file_path = 'wiki_concat_jsonl.jsonl'
11
+
12
+ model_id = "geniacllm/dMoE_8B_math_iter8999"
13
+ tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False, torch_dtype="auto", device_map="auto")
14
+ print(" load tokenizer ")
15
+
16
+ # .jsonl ファイルのフルパスを格納するリスト
17
+ jsonl_files = []
18
+ jsonl_datas = []
19
+
20
+ with open(file_path, 'r', encoding='utf-8') as file:
21
+ for line in file:
22
+ # JSON形式のデータをPythonの辞書に変換
23
+ data = json.loads(line)
24
+ jsonl_datas.append(data["text"])
25
+
26
+ # print("data", gsm8l_datas )
27
+ print("data", len(jsonl_datas) )
28
+ print("data", jsonl_datas[-1] )
29
+
30
+ # model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype="auto", device_map="auto")
31
+ # print(model)
32
+ # inputs = tokenizer("地球温暖化の主な原因となる物質は", return_tensors="pt").to(model.device)
33
+ # inputs
34
+
35
+ all_data = []
36
+ word_num = 0
37
+
38
+ short_full_document_list =[]
39
+ complete_full_document_list =[]
40
+ count = 0
41
+
42
+ total_items = len(jsonl_datas)
43
+ # big_list = list(range(total_items))
44
+
45
+ # サブリストの数
46
+ num_chunks = 25
47
+ # 各サブリストの長さを計算
48
+ items_per_sublist = total_items // num_chunks
49
+ # サブリストを生成
50
+ chunks = [jsonl_datas[i * items_per_sublist:(i + 1) * items_per_sublist] for i in range(num_chunks)]
51
+
52
+ # 最後のサブリストに残りのアイテムを追加する
53
+ if total_items % num_chunks != 0:
54
+ remaining_items = jsonl_datas[num_chunks * items_per_sublist:]
55
+ chunks[-1].extend(remaining_items)
56
+
57
+ # 並列を行う関数
58
+ def process_item(sublists):
59
+ for index, one_data in tqdm( enumerate(sublists)):
60
+ one_data_token = tokenizer(one_data, return_tensors="pt", add_special_tokens=True)
61
+ one_data_token_len = one_data_token.input_ids[0].size(0)
62
+ if one_data_token_len < 50:
63
+ pass
64
+ elif one_data_token_len < 1600:
65
+ short_full_document_list.append( {'length': one_data_token_len, 'text': one_data } )
66
+ elif one_data_token_len < 2010:
67
+ complete_full_document_list.append( {'length': one_data_token_len, 'text': one_data } )
68
+ else:
69
+ one_sentences = one_data.split('。')
70
+ collect_one_sentences = []
71
+ split_one_sentence_len = 0
72
+ for index, sentence in enumerate(one_sentences):
73
+ one_sentence_token = tokenizer(sentence, return_tensors="pt", add_special_tokens=True)
74
+ one_sentence_token_len = one_sentence_token.input_ids[0].size(0)
75
+ if split_one_sentence_len + one_sentence_token_len < 2010:
76
+ split_one_sentence_len = split_one_sentence_len + one_sentence_token_len
77
+ collect_one_sentences.append( sentence )
78
+ elif one_sentence_token_len > 2010:
79
+ print(" Warning : Over one-sentence token :", one_sentence_token_len )
80
+ print( sentence )
81
+ split_document = "\n".join(collect_one_sentences)
82
+ short_full_document_list.append( {'length': split_one_sentence_len, 'text': split_document } )
83
+ split_one_sentence_len = 0
84
+ collect_one_sentences.clear()
85
+ else :
86
+ split_document = "\n".join(collect_one_sentences)
87
+ complete_full_document_list.append( {'length': split_one_sentence_len, 'text': split_document } )
88
+ split_one_sentence_len = one_sentence_token_len
89
+ collect_one_sentences.clear()
90
+ collect_one_sentences.append( sentence )
91
+ split_document = "\n".join(collect_one_sentences)
92
+ if split_one_sentence_len < 1500 :
93
+ short_full_document_list.append( {'length': split_one_sentence_len, 'text': split_document } )
94
+ elif split_one_sentence_len < 2015 :
95
+ complete_full_document_list.append( {'length': split_one_sentence_len, 'text': split_document } )
96
+ else:
97
+ print(" Warning : Over split-one-sentence token :", split_one_sentence_len )
98
+ print( split_document )
99
+ # if index > 96605:
100
+ # break
101
+
102
+ complete_full_document_sub_list = []
103
+
104
+ while True:
105
+ if len(short_full_document_list) == 0:
106
+ break
107
+
108
+ if len(short_full_document_list)%500 == 0:
109
+ print( "残り : ", len(short_full_document_list) )
110
+ target = short_full_document_list[0]
111
+ left_len = 2010 - target['length']
112
+ if left_len < 0 :
113
+ print(" Error : Over token target :", target['length'] )
114
+ del short_full_document_list[0] # 0番目の要素を削除
115
+ if len(short_full_document_list) == 0:
116
+ complete_full_document_sub_list.append( target )
117
+ break
118
+ else:
119
+ # ステップ1: 最小差分を計算(デフォルト値を使用)
120
+ closest_length_diff = min((left_len - rec['length'] for rec in short_full_document_list if rec['length'] <= left_len), default=None)
121
+
122
+ if closest_length_diff is None:
123
+ complete_full_document_sub_list.append( target )
124
+ # print( "complete_full_document_list.append" )
125
+ else:
126
+ # 特定の条件に合致するデータのインデックスを探す
127
+ index = next((i for i, rec in enumerate(short_full_document_list) if rec['length'] == left_len - closest_length_diff ), None)
128
+ merge_document = short_full_document_list[index]
129
+ del short_full_document_list[index] # index番目の要素を削除
130
+ # 結果の出力
131
+ if index is not None:
132
+ merge_texts = target['text'] + "</s>" + merge_document['text']
133
+ if target['length'] + merge_document['length'] < 1500 :
134
+ short_full_document_list.append( {'length': target['length'] + merge_document['length'] + 1 , 'text': merge_texts } )
135
+ # print( "merge_texts" , merge_texts )
136
+ # print( "short_full_document_list" , short_full_document_list[-1] )
137
+ elif target['length'] + merge_document['length'] > 2022 :
138
+ print(" Error : Over token target :", target['length'] , " merge_document : ", merge_document['length'] )
139
+ else:
140
+ complete_full_document_sub_list.append( {'length': target['length'] + merge_document['length'] + 1 , 'text': merge_texts } )
141
+ # print( "merge_texts" , merge_texts )
142
+ else:
143
+ print(f"長さが {merge_document['length'] } のデータは見つかりませんでした。")
144
+
145
+ return complete_full_document_sub_list
146
+
147
+ # print( "L110 sublists " , len( chunks) )
148
+ # プロセスプールを使用して各チャンクを並列処理
149
+ with concurrent.futures.ProcessPoolExecutor() as executor:
150
+ results = list(executor.map(process_item, chunks))
151
+ # print( "L114 results " , len( results) )
152
+ # for result in results:
153
+ # print( "L116 result " , len( result) )
154
+
155
+ # リスト内包表記を使って平坦化
156
+ # sumを使って平坦化
157
+ complete_full_document_list.extend( sum(results, [] ))
158
+
159
+ print( "L121 complete_full_document_list " , len( complete_full_document_list) )
160
+
161
+ length_list = []
162
+ collect_length_list = []
163
+ file_count = 0
164
+
165
+ complete_full_document_list_len = len(complete_full_document_list)
166
+ # 各ファイルに格納する最大ドキュメント数
167
+ max_docs_per_file = 9999
168
+
169
+ # 必要なファイルの数
170
+ jsonl_num = complete_full_document_list_len // max_docs_per_file + (1 if complete_full_document_list_len % max_docs_per_file != 0 else 0)
171
+
172
+ # ファイルにデータを分割して保存
173
+ for i in range(jsonl_num):
174
+ start_index = i * max_docs_per_file
175
+ end_index = start_index + max_docs_per_file
176
+ # ファイル名を生成し、連番を付ける
177
+ filename = f"{i}_speech_text.jsonl" # ファイル名形式: "番号_名前.jsonl"
178
+
179
+ # ファイルを開いてデータを書き込む
180
+ with open(filename, 'w', encoding='utf-8') as file:
181
+ for document in complete_full_document_list[start_index:end_index]:
182
+ # 各テキストに対するJSONオブジェクトを作成
183
+ json_obj = {
184
+ "text": document['text'] ,
185
+ "is_rejected": False,
186
+ "reason": {}
187
+ }
188
+ # JSON形式の文字列に変換
189
+ json_line = json.dumps(json_obj, ensure_ascii=False)
190
+ # ファイルに書き込み
191
+ file.write(json_line + '\n')
192
+ length_list.append( document['length'] )
193
+ document_tokens = tokenizer(document['text'] , return_tensors="pt", add_special_tokens=True)
194
+ document_tokens_len = document_tokens.input_ids[0].size(0)
195
+ collect_length_list.append( document_tokens_len )
196
+ if document_tokens_len > 2021:
197
+ print("L209 Error : Over token ", document['length'] , " collect : ", document_tokens_len )
198
+ print( "len(complete_full_document_list) " , len(complete_full_document_list) )
199
+ # print( "length_list " , length_list )