Update Berlin Policy Journal

This commit is contained in:
aimylios 2016-07-10 09:21:13 +02:00
parent ea8137ac42
commit 0adf19efaa

View File

@ -1,9 +1,8 @@
#!/usr/bin/env python2 #!/usr/bin/env python2
# vim:fileencoding=utf-8 # vim:fileencoding=utf-8
from __future__ import unicode_literals # License: GPLv3 Copyright: 2016, Aimylios <aimylios at gmx.de>
__license__ = 'GPL v3' from __future__ import unicode_literals, division, absolute_import, print_function
__copyright__ = '2016, Aimylios'
''' '''
berlinpolicyjournal.com berlinpolicyjournal.com
@ -20,7 +19,7 @@ class BerlinPolicyJournal(BasicNewsRecipe):
publication_type = 'magazine' publication_type = 'magazine'
language = 'en_DE' language = 'en_DE'
oldest_article = 75 oldest_article = 50
max_articles_per_feed = 30 max_articles_per_feed = 30
simultaneous_downloads = 5 simultaneous_downloads = 5
no_stylesheets = True no_stylesheets = True
@ -29,14 +28,12 @@ class BerlinPolicyJournal(BasicNewsRecipe):
conversion_options = {'smarten_punctuation' : True, conversion_options = {'smarten_punctuation' : True,
'publisher' : publisher} 'publisher' : publisher}
# uncomment this to reduce file size
# compress_news_images = True
# compress_news_images_max_size = 16
INDEX = 'http://berlinpolicyjournal.com/' INDEX = 'http://berlinpolicyjournal.com/'
masthead_url = INDEX + 'IP/wp-content/uploads/2015/04/logo_bpj_header.gif' masthead_url = INDEX + 'IP/wp-content/uploads/2015/04/logo_bpj_header.gif'
keep_only_tags = [dict(name='article')] keep_only_tags = [
dict(name='article')
]
remove_tags = [ remove_tags = [
dict(name='div', attrs={'class':['hidden', 'meta-count', 'meta-share']}), dict(name='div', attrs={'class':['hidden', 'meta-count', 'meta-share']}),
@ -44,44 +41,52 @@ class BerlinPolicyJournal(BasicNewsRecipe):
dict(name='img', attrs={'alt':re.compile('_store_120px_width$')}), dict(name='img', attrs={'alt':re.compile('_store_120px_width$')}),
dict(name='img', attrs={'alt':re.compile('^bpj_app_')}), dict(name='img', attrs={'alt':re.compile('^bpj_app_')}),
dict(name='img', attrs={'alt':re.compile('^BPJ-Montage_')}), dict(name='img', attrs={'alt':re.compile('^BPJ-Montage_')}),
dict(name='footer'), dict(name=['link', 'footer', 'br'])
dict(name='br')
] ]
remove_attributes = ['sizes', 'width', 'height', 'align'] remove_attributes = ['sizes', 'width', 'height', 'align']
extra_css = 'h1 {font-size: 1.6em; text-align: left} \ extra_css = 'h1 {font-size: 1.6em; text-align: left} \
.entry-subtitle {font-style: italic; margin-bottom: 1em} \ .entry-subtitle {font-style: italic; margin-bottom: 1em} \
.wp-caption {margin-top: 1em} \
.wp-caption-text {font-size: 0.6em; margin-top: 0em}' .wp-caption-text {font-size: 0.6em; margin-top: 0em}'
def parse_index(self): def parse_index(self):
articles = {} soup = self.index_to_soup(self.INDEX)
for i in range(1,5): img_div = soup.find('div', {'id':'text-2'})
soup = self.index_to_soup(self.INDEX + 'page/' + str(i)) self.cover_url = img_div.find('img', src=True)['src']
if i == 1: menu = soup.find('ul', {'id':re.compile('menu-ip')})
img_div = soup.find('div', {'id':'text-2'}) submenus = menu.findAll('li', {'class':re.compile('item-has-children')})
self.cover_url = img_div.find('img', src=True)['src'] mag = submenus[0].find('li')
for div in soup.findAll('div', {'class':'post-box-big'}): mag_name = self.tag_to_string(mag.a)
timestamp = time.strptime(div.find('time')['datetime'], '%Y-%m-%dT%H:%M:%S+00:00') mag_url = mag.a['href']
article_age = time.time() - time.mktime(timestamp) categories = [{'name':mag_name, 'url':mag_url, 'type':'magazine'}]
if article_age <= self.oldest_article*24*3600: for blog in submenus[1].findAll('li'):
category = self.tag_to_string(div.findAll('a', {'rel':'category'})[-1]) blog_name = self.tag_to_string(blog.a)
if category not in articles: blog_url = blog.a['href']
articles[category] = [] categories.append({'name':blog_name, 'url':blog_url, 'type':'blog'})
article_title = self.tag_to_string(div.find('h3', {'class':'entry-title'}).a)
article_url = div.find('h3', {'class':'entry-title'}).a['href']
article_date = unicode(time.strftime(' [%a, %d %b %H:%M]', timestamp))
article_desc = self.tag_to_string(div.find('div', {'class':'i-summary'}).p)
articles[category].append({'title':article_title,
'url':article_url,
'date':article_date,
'description':article_desc})
feeds = [] feeds = []
for feed in articles: for cat in categories:
if '/' in feed: cat['articles'] = []
feeds.insert(0, (feed, articles[feed])) for i in ['1', '2']:
else: soup = self.index_to_soup(cat['url'] + '/page/' + i)
feeds.append((feed, articles[feed])) for div in soup.findAll('div', {'class':'post-box-big'}):
timestamp = time.strptime(div.find('time')['datetime'][:15], '%Y-%m-%dT%H:%M')
age = (time.time() - time.mktime(timestamp)) / (24 * 3600)
if age > self.oldest_article and cat['type'] == 'blog':
continue
article_title = self.tag_to_string(div.find('h3', {'class':'entry-title'}).a)
article_url = div.find('h3', {'class':'entry-title'}).a['href']
article_date = unicode(time.strftime(' [%a, %d %b %H:%M]', timestamp))
article_desc = self.tag_to_string(div.find('div', {'class':'i-summary'}).p)
cat['articles'].append({'title':article_title,
'url':article_url,
'date':article_date,
'description':article_desc})
if soup.find('div', {'class':'pagination'}) is None:
break
if cat['articles']:
feeds.append((cat['name'], cat['articles']))
return feeds return feeds
def postprocess_html(self, soup, first_fetch): def postprocess_html(self, soup, first_fetch):
@ -91,5 +96,5 @@ class BerlinPolicyJournal(BasicNewsRecipe):
for entry in div.findAll('span', {'class':'entry-author'}): for entry in div.findAll('span', {'class':'entry-author'}):
authors = authors + entry.a.span.renderContents().strip() + ', ' authors = authors + entry.a.span.renderContents().strip() + ', '
date = div.find('time').renderContents().strip() date = div.find('time').renderContents().strip()
div.replaceWith('<div>' + authors[:-2] + ' (' + date + ')<br/></div>') div.replaceWith('<div>' + date + ' | ' + authors[:-2] + '<br/></div>')
return soup return soup