mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-08 18:54:09 -04:00
Update Foreign Affairs
This commit is contained in:
parent
36f1bd005f
commit
72f6c7f2be
@ -1,5 +1,8 @@
|
|||||||
|
#!/usr/bin/env python2
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
import re
|
import re
|
||||||
|
import html5lib
|
||||||
|
from lxml import html
|
||||||
|
|
||||||
|
|
||||||
def select_form(form):
|
def select_form(form):
|
||||||
@ -35,44 +38,20 @@ class ForeignAffairsRecipe(BasicNewsRecipe):
|
|||||||
needs_subscription = True
|
needs_subscription = True
|
||||||
|
|
||||||
INDEX = 'http://www.foreignaffairs.com'
|
INDEX = 'http://www.foreignaffairs.com'
|
||||||
FRONTPAGE = 'http://www.foreignaffairs.com/magazine'
|
FRONTPAGE = INDEX + '/magazine'
|
||||||
|
|
||||||
remove_tags = [dict(name='svg')]
|
keep_only_tags = [
|
||||||
remove_tags_before = dict(name='div', attrs={'class': 'print-content'})
|
dict(attrs={'class':lambda x: x and set(x.split()).intersection(set('article-header l-article-column'.split()))}),
|
||||||
remove_tags_after = dict(name='div', attrs={'class': 'print-footer'})
|
]
|
||||||
|
|
||||||
extra_css = '''
|
|
||||||
body{font-family:verdana,arial,helvetica,geneva,sans-serif;}
|
|
||||||
div.print-footer {font-size: x-small; color: #696969;}
|
|
||||||
'''
|
|
||||||
|
|
||||||
conversion_options = {'comments': description, 'tags': category, 'language': 'en',
|
conversion_options = {'comments': description, 'tags': category, 'language': 'en',
|
||||||
'publisher': publisher}
|
'publisher': publisher}
|
||||||
|
|
||||||
temp_files = []
|
|
||||||
|
|
||||||
def get_cover_url(self):
|
|
||||||
soup = self.index_to_soup(self.FRONTPAGE)
|
|
||||||
div = soup.find('div', attrs={'class':'magazine-hero__image image_auto_width'})
|
|
||||||
img_url = div.find('img')['src']
|
|
||||||
return img_url # The url includes the https:// as necessary
|
|
||||||
|
|
||||||
def get_print_url(self, url):
|
|
||||||
article_soup = self.index_to_soup(url.strip())
|
|
||||||
|
|
||||||
if article_soup is not None:
|
|
||||||
shortlink = article_soup.find('a', attrs={'class':re.compile(r'\bicon-print\b')})
|
|
||||||
if shortlink:
|
|
||||||
return shortlink['href']
|
|
||||||
else:
|
|
||||||
return url
|
|
||||||
else:
|
|
||||||
return url
|
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
|
|
||||||
answer = []
|
answer = []
|
||||||
soup = self.index_to_soup(self.FRONTPAGE)
|
soup = self.index_to_soup(self.FRONTPAGE)
|
||||||
|
div = soup.find('div', attrs={'class':'magazine-hero__image image_auto_width'})
|
||||||
|
self.cover_url = div.find('img')['src']
|
||||||
# get dates
|
# get dates
|
||||||
date = re.split('\s\|\s',self.tag_to_string(soup.head.title.string))[0]
|
date = re.split('\s\|\s',self.tag_to_string(soup.head.title.string))[0]
|
||||||
self.title = "Foreign Affairs ({})".format(date)
|
self.title = "Foreign Affairs ({})".format(date)
|
||||||
@ -85,8 +64,7 @@ class ForeignAffairsRecipe(BasicNewsRecipe):
|
|||||||
for article_block in sec.findAll('article'):
|
for article_block in sec.findAll('article'):
|
||||||
if article_block.find('a') is not None:
|
if article_block.find('a') is not None:
|
||||||
title=self.tag_to_string(article_block.div.a.h2)
|
title=self.tag_to_string(article_block.div.a.h2)
|
||||||
article_url = article_block.div.a['href']
|
url = article_block.div.a['href']
|
||||||
url = self.get_print_url(article_url)
|
|
||||||
atr=article_block.findNext('p', attrs={'class': 'author'})
|
atr=article_block.findNext('p', attrs={'class': 'author'})
|
||||||
if atr is not None:
|
if atr is not None:
|
||||||
author=self.tag_to_string(atr)
|
author=self.tag_to_string(atr)
|
||||||
@ -102,6 +80,14 @@ class ForeignAffairsRecipe(BasicNewsRecipe):
|
|||||||
answer.append((section, articles))
|
answer.append((section, articles))
|
||||||
return answer
|
return answer
|
||||||
|
|
||||||
|
def preprocess_raw_html(self, raw_html, url):
|
||||||
|
root = html5lib.parse(raw_html, treebuilder='lxml', namespaceHTMLElements=False).getroot()
|
||||||
|
for svg in tuple(root.iter('{*}svg')):
|
||||||
|
svg.getparent().remove(svg)
|
||||||
|
for meta in tuple(root.iter('{*}meta')):
|
||||||
|
meta.getparent().remove(meta)
|
||||||
|
return html.tostring(root)
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
for img in soup.findAll('img', attrs={'src': True}):
|
for img in soup.findAll('img', attrs={'src': True}):
|
||||||
if not img['src'].startswith('http'):
|
if not img['src'].startswith('http'):
|
||||||
@ -110,8 +96,6 @@ class ForeignAffairsRecipe(BasicNewsRecipe):
|
|||||||
return soup
|
return soup
|
||||||
|
|
||||||
def get_browser(self):
|
def get_browser(self):
|
||||||
import html5lib
|
|
||||||
from lxml import html
|
|
||||||
br = BasicNewsRecipe.get_browser(self)
|
br = BasicNewsRecipe.get_browser(self)
|
||||||
if self.username is not None and self.password is not None:
|
if self.username is not None and self.password is not None:
|
||||||
# mechanize fails to parse the html correctly, so use html5lib to
|
# mechanize fails to parse the html correctly, so use html5lib to
|
||||||
@ -125,6 +109,3 @@ class ForeignAffairsRecipe(BasicNewsRecipe):
|
|||||||
br.form['pass'] = self.password
|
br.form['pass'] = self.password
|
||||||
br.submit()
|
br.submit()
|
||||||
return br
|
return br
|
||||||
|
|
||||||
def cleanup(self):
|
|
||||||
self.browser.open('https://www.foreignaffairs.com/user/logout')
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user