from bs4 import BeautifulSoup from datasets import load_dataset def get_titles(file_path): # get the titles from html file (html file is downloaded from https://de.wikipedia.org/wiki/Wikipedia:Exzellente_Artikel) with open(file_path, 'r') as f: html_content = f.read() soup = BeautifulSoup(html_content, 'html.parser') # Find the element (you can modify this based on your HTML structure) tbody = soup.find('tbody') # Extract all elements within the element trs = tbody.find_all('tr') # Remove the first two elements trs = trs[2:] # Extract title attributes from the remaining elements titles = [] for tr in trs: if tr is None or tr.find('a') is None: continue a_tags = tr.find_all('a') for a_tag in a_tags: if a_tag and 'title' in a_tag.attrs: titles.append(a_tag['title']) return titles if __name__ == '__main__': titles_exzellent = get_titles('exzellent.txt') #titles_lesenswert = get_titles('lesenswert.txt') titles = titles_exzellent #+ titles_lesenswert titles = list(set(titles)) with open('titles.txt', 'w') as f: for title in titles: f.write(title + '\n') # Get wikipedia dataset dataset = load_dataset("graelo/wikipedia", "20230901.de", split="train") # Filter dataset dataset = dataset.filter(lambda example: example['title'] in titles, num_proc=64) dataset.map(lambda x: {'text': f"# {x['title']}\n\n{x['text']}"}, remove_columns=['title'], num_proc=64) # Save dataset used_title = [example['title'] for example in dataset] non_used_title = [title for title in titles if title not in used_title] print(f'Number of used titles: {len(used_title)}') print(f'Number of non used titles: {len(non_used_title)}') print(non_used_title[:20]) dataset.push_to_hub("LeoLM/wiki_de_exzellent", private=True)