calibre/recipes/danas.recipe
2018-01-09 20:58:04 +05:30

103 lines
3.9 KiB
Plaintext

# -*- mode: python -*-
# -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = '2008-2018, Darko Miletic <darko.miletic at gmail.com>'
'''
danas.rs
'''
from calibre.web.feeds.news import BasicNewsRecipe
from datetime import date, timedelta
class Danas(BasicNewsRecipe):
title = 'Danas'
__author__ = 'Darko Miletic'
description = 'Dnevne novine sa vestima iz sveta, politike, ekonomije, kulture, sporta, Beograda, Novog Sada i cele Srbije.'
publisher = 'DAN GRAF d.o.o.'
category = 'danas Dnevne novine politika drustvo ekonomija svet sport beograd vojvodina periskop terazije Beograd Srbija Novi Sad Nis Kragujevac Vojvodina politics Serbia' # noqa
oldest_article = 2
max_articles_per_feed = 100
no_stylesheets = False
use_embedded_content = False
encoding = 'utf-8'
masthead_url = 'http://www.danas.rs/images/basic/logo.png'
language = 'sr'
remove_javascript = True
publication_type = 'newspaper'
remove_empty_feeds = True
auto_cleanup = True
auto_cleanup_keep = '//h1[@class="title"] | //div[@class="subTitle"] | //div[@class="author"] | //div[@class="published"]'
resolve_internal_links = True
INDEX = "http://www.danas.rs"
extra_css = """
.author{font-size: small}
.published {font-size: small}
img{margin-bottom: 0.8em}
"""
conversion_options = {
'comment': description,
'tags': category,
'publisher': publisher,
'language': language
}
feeds = [
(u'Politika', u'http://www.danas.rs/politika.56.html'),
(u'Drustvo', u'http://www.danas.rs/drustvo.55.html'),
(u'Dijalog', u'http://www.danas.rs/dijalog.46.html'),
(u'Drustvo', u'http://www.danas.rs/drustvo.55.html'),
(u'Ekonomija', u'http://www.danas.rs/ekonomija.4.html'),
(u'Svet', u'http://www.danas.rs/svet.1160.html'),
(u'Sport', u'http://www.danas.rs/sport.18.html'),
(u'Kultura', u'http://www.danas.rs/kultura.11.html'),
(u'Scena', u'http://www.danas.rs/scena.561.html'),
(u'Zivot', u'http://www.danas.rs/zivot.1140.html'),
(u'Auto', u'http://www.danas.rs/auto.1171.html'),
(u'IT', u'http://www.danas.rs/it.29.html'),
(u'Ljudi', u'http://www.danas.rs/ljudi.1141.html'),
(u'Beograd', u'http://www.danas.rs/beograd.39.html'),
]
def get_cover_url(self):
td = date.today() - timedelta(1)
cpart = str(td.year) + '/' + str(td.month) + '/' + str(td.day)
return 'http://www.danas.rs/upload/images/' + cpart + '/Naslovna_velika.jpg'
def parse_index(self):
totalfeeds = []
lfeeds = self.get_feeds()
for feedobj in lfeeds:
feedtitle, feedurl = feedobj
self.report_progress(
0,
_('Fetching feed') + ' %s...' %
(feedtitle if feedtitle else feedurl)
)
articles = []
soup = self.index_to_soup(feedurl)
for item in soup.findAll(
'div', attrs={
'class': lambda x: x and 'newsBox' in x.split()
}
):
atag = item.find('h2').a
ptag = item.find('p', attrs={'class': 'lead'})
artdate = item.find('div', attrs={'class': 'date'})
url = self.INDEX + atag['href']
title = self.tag_to_string(atag)
description = ''
if ptag:
description = self.tag_to_string(ptag)
date = self.tag_to_string(artdate)
articles.append({
'title': title,
'date': date,
'url': url,
'description': description
})
totalfeeds.append((feedtitle, articles))
return totalfeeds