Datasets:

Modalities:
Text
Formats:
csv
Languages:
Italian
Tags:
legal
Libraries:
Datasets
pandas
License:
RedHitMark commited on
Commit
41bb7cb
·
1 Parent(s): a311b5f

corpus script generation

Browse files
Files changed (6) hide show
  1. .gitignore +1 -0
  2. README.md +1 -0
  3. corpus.csv +3 -0
  4. corpus.tsv +3 -0
  5. document_to_corpus.py +43 -0
  6. requirements.txt +2 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .venv
README.md CHANGED
@@ -1,3 +1,4 @@
1
  ---
2
  license: mit
3
  ---
 
 
1
  ---
2
  license: mit
3
  ---
4
+ # Corpus ItaIst
corpus.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76dad0b3ce089f1ad6e72615ea785fc2b7e4f3a9ef1c61876e82d2abddee1723
3
+ size 14025525
corpus.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f2fceef17df443a863579507600071fe3274b429c9886656a9b2de2104ea691
3
+ size 14024853
document_to_corpus.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import pandas as pd
4
+
5
+ BASE_DIR = os.path.join(".")
6
+
7
+ if __name__ == "__main__":
8
+ chapters = []
9
+
10
+ for root, dirs, files in os.walk(os.path.join(BASE_DIR, "documents")):
11
+ if "extracted_text.md" in files:
12
+ with open(os.path.join(root, "extracted_text.md"), "r", encoding="utf-8") as file:
13
+ full_path = os.path.join(root, "extracted_text.md")
14
+ text = file.read()
15
+ lines = text.split("\n")
16
+ chap_indices = [i for i in range(len(lines)) if lines[i].startswith("# ")]
17
+ for i in range(len(chap_indices)):
18
+ next_index = chap_indices[i + 1] if i < len(chap_indices) - 1 else len(lines)
19
+
20
+ title = lines[chap_indices[i]][2:].strip()
21
+
22
+ content = '\n'.join(lines[chap_indices[i] + 1:next_index])
23
+ content = '\n'.join(line.rstrip() for line in content.split("\n"))
24
+ content = content.rstrip()
25
+
26
+ chapters.append({
27
+ 'region': full_path.split('\\')[-5],
28
+ 'topic': full_path.split('\\')[-4],
29
+ 'document_type': full_path.split('\\')[-3],
30
+ 'entity': full_path.split('\\')[-2].split('_')[1],
31
+ 'document_date': full_path.split('\\')[-2].split('_')[0],
32
+ 'document_id': full_path.split('\\')[-2].split('_')[2],
33
+ 'progress': i+1,
34
+ "title": title,
35
+ "content": content
36
+ })
37
+
38
+ print(chapters[0])
39
+
40
+ df = pd.DataFrame(chapters).sort_values(by=['region', 'topic', 'document_type', 'document_id', 'progress'])
41
+ df.to_csv(os.path.join(BASE_DIR, 'corpus.csv'), index=False)
42
+ df.to_csv(os.path.join(BASE_DIR, 'corpus.tsv'), sep='\t', index=False)
43
+ df.to_excel(os.path.join(BASE_DIR, 'corpus.xlsx'), index=False)
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ pandas==2.2.3
2
+ openpyxl==3.1.5