File size: 1,574 Bytes
f9e169a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import os
import scrapy
from langdetect import detect

from ..utils.dash_remover import clean
from ..utils.noise_word_remover import remove
from ..utils.highlights_remover import remove_highlights


class KamiSpider(scrapy.Spider):
    name = 'kami'
    print(os.getcwd())
    start_urls = [f'file://{os.getcwd()}/3000.html']
    

    def __init__(self, **kwargs):
        super().__init__(**kwargs) 


    def parse(self, response):
        articles = response.css('div.c-article-card-horizontal__container a::attr(href)').getall()

        for article in articles:
            yield response.follow(article, callback=self.parse_articles)


    def parse_articles(self, response):
        # for article text
        bold_text = remove(response.css("strong::text").getall())
        article_text = remove(response.css("div.post__content p *::text").getall())

        cleaned_article_text = remove_highlights(bold_text, article_text)
        article_text = ' '.join(cleaned_article_text).strip()

        # for summary
        highlights = list(filter(lambda x: x.startswith("-"), response.css('strong::text').getall()))
        cleaned_highlights = list(map(clean, highlights))
        summary = '.'.join(cleaned_highlights).strip()


        if detect(summary) != "en":
            yield {
                'title': response.css("h1.c-main-headline::text").get(),
                'article_text': article_text,
                'summary': summary,
                'article_date': response.css("time::text").get(),
                'source': response.request.url
            }