mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-06-23 15:30:45 -04:00
Update Berlin Policy Journal
This commit is contained in:
parent
3651c85635
commit
aaf6b52f36
@ -22,7 +22,7 @@ class BerlinPolicyJournal(BasicNewsRecipe):
|
||||
|
||||
oldest_article = 75
|
||||
max_articles_per_feed = 30
|
||||
simultaneous_downloads = 10
|
||||
simultaneous_downloads = 5
|
||||
no_stylesheets = True
|
||||
remove_javascript = True
|
||||
|
||||
@ -34,8 +34,6 @@ class BerlinPolicyJournal(BasicNewsRecipe):
|
||||
# compress_news_images_max_size = 16
|
||||
|
||||
INDEX = 'http://berlinpolicyjournal.com/'
|
||||
FRONTPAGE = INDEX + 'page/'
|
||||
cover_source = INDEX
|
||||
masthead_url = INDEX + 'IP/wp-content/uploads/2015/04/logo_bpj_header.gif'
|
||||
|
||||
keep_only_tags = [dict(name='article')]
|
||||
@ -43,9 +41,9 @@ class BerlinPolicyJournal(BasicNewsRecipe):
|
||||
remove_tags = [
|
||||
dict(name='div', attrs={'class':['hidden', 'meta-count', 'meta-share']}),
|
||||
dict(name='span', attrs={'class':'ava-auth'}),
|
||||
dict(name='img', attrs={'alt':re.compile("_store_120px_width$")}),
|
||||
dict(name='img', attrs={'alt':re.compile("^bpj_app_")}),
|
||||
dict(name='img', attrs={'alt':re.compile("^BPJ-Montage_")}),
|
||||
dict(name='img', attrs={'alt':re.compile('_store_120px_width$')}),
|
||||
dict(name='img', attrs={'alt':re.compile('^bpj_app_')}),
|
||||
dict(name='img', attrs={'alt':re.compile('^BPJ-Montage_')}),
|
||||
dict(name='footer'),
|
||||
dict(name='br')
|
||||
]
|
||||
@ -56,27 +54,24 @@ class BerlinPolicyJournal(BasicNewsRecipe):
|
||||
.entry-subtitle {font-style: italic; margin-bottom: 1em} \
|
||||
.wp-caption-text {font-size: 0.6em; margin-top: 0em}'
|
||||
|
||||
def get_cover_url(self):
|
||||
soup = self.index_to_soup(self.cover_source)
|
||||
img_div = soup.find('div', id='text-2')
|
||||
self.cover_url = img_div.find('img', src=True)['src']
|
||||
return self.cover_url
|
||||
|
||||
def parse_index(self):
|
||||
articles = {}
|
||||
for i in range(1,5):
|
||||
soup = self.index_to_soup(self.FRONTPAGE + str(i))
|
||||
for div in soup.findAll('div', attrs={'class':'post-box-big'}):
|
||||
soup = self.index_to_soup(self.INDEX + 'page/' + str(i))
|
||||
if i == 1:
|
||||
img_div = soup.find('div', {'id':'text-2'})
|
||||
self.cover_url = img_div.find('img', src=True)['src']
|
||||
for div in soup.findAll('div', {'class':'post-box-big'}):
|
||||
timestamp = time.strptime(div.find('time')['datetime'], '%Y-%m-%dT%H:%M:%S+00:00')
|
||||
article_age = time.time() - time.mktime(timestamp)
|
||||
if article_age <= self.oldest_article*24*3600:
|
||||
category = self.tag_to_string(div.findAll('a', attrs={'rel':'category'})[-1])
|
||||
category = self.tag_to_string(div.findAll('a', {'rel':'category'})[-1])
|
||||
if category not in articles:
|
||||
articles[category] = []
|
||||
article_title = self.tag_to_string(div.find('h3', attrs={'class':'entry-title'}).a)
|
||||
article_url = div.find('h3', attrs={'class':'entry-title'}).a['href']
|
||||
article_title = self.tag_to_string(div.find('h3', {'class':'entry-title'}).a)
|
||||
article_url = div.find('h3', {'class':'entry-title'}).a['href']
|
||||
article_date = unicode(time.strftime(' [%a, %d %b %H:%M]', timestamp))
|
||||
article_desc = self.tag_to_string(div.find('div', attrs={'class':'i-summary'}).p)
|
||||
article_desc = self.tag_to_string(div.find('div', {'class':'i-summary'}).p)
|
||||
articles[category].append({'title':article_title,
|
||||
'url':article_url,
|
||||
'date':article_date,
|
||||
|
Loading…
x
Reference in New Issue
Block a user