mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Fix #4256 (HBR Retreival) and add command line option to disable recipe download
This commit is contained in:
parent
c9cf600295
commit
136c485114
@ -11,17 +11,15 @@ class HBR(BasicNewsRecipe):
|
|||||||
language = 'en'
|
language = 'en'
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
|
|
||||||
LOGIN_URL = 'http://hbr.harvardbusiness.org/login?request_url=/'
|
LOGIN_URL = 'http://hbr.org/login?request_url=/'
|
||||||
INDEX = 'http://hbr.harvardbusiness.org/current'
|
INDEX = 'http://hbr.org/current'
|
||||||
|
|
||||||
keep_only_tags = [dict(name='div', id='content')]
|
|
||||||
remove_tags = [
|
|
||||||
dict(id=['articleDate', 'subscriptionModule', 'errorArea',
|
|
||||||
'feedbackForm', 'relatedModule', 'articleSecondaryModule',
|
|
||||||
'contentRight', 'summaryLink']),
|
|
||||||
dict(name='form'),
|
|
||||||
]
|
|
||||||
|
|
||||||
|
keep_only_tags = [dict(name='div', id='pageContainer')]
|
||||||
|
remove_tags = [dict(id=['mastheadContainer', 'magazineHeadline',
|
||||||
|
'articleToolbarTopRD', 'pageRightSubColumn', 'pageRightColumn',
|
||||||
|
'todayOnHBRListWidget', 'mostWidget', 'keepUpWithHBR',
|
||||||
|
'mailingListTout', 'partnerCenter', 'pageFooter']),
|
||||||
|
dict(name='iframe')]
|
||||||
extra_css = '''
|
extra_css = '''
|
||||||
a {font-family:Georgia,"Times New Roman",Times,serif; font-style:italic; color:#000000; }
|
a {font-family:Georgia,"Times New Roman",Times,serif; font-style:italic; color:#000000; }
|
||||||
.article{font-family:Georgia,"Times New Roman",Times,serif; font-size: xx-small;}
|
.article{font-family:Georgia,"Times New Roman",Times,serif; font-size: xx-small;}
|
||||||
@ -34,14 +32,14 @@ class HBR(BasicNewsRecipe):
|
|||||||
def get_browser(self):
|
def get_browser(self):
|
||||||
br = BasicNewsRecipe.get_browser(self)
|
br = BasicNewsRecipe.get_browser(self)
|
||||||
br.open(self.LOGIN_URL)
|
br.open(self.LOGIN_URL)
|
||||||
br.select_form(nr=0)
|
br.select_form(name='signInForm')
|
||||||
br['ssousername'] = self.username
|
br['signInForm:username'] = self.username
|
||||||
br['password'] = self.password
|
br['signInForm:password'] = self.password
|
||||||
raw = br.submit().read()
|
raw = br.submit().read()
|
||||||
if 'My Account' not in raw:
|
if 'My Account' not in raw:
|
||||||
raise Exception('Failed to login, are you sure your username and password are correct?')
|
raise Exception('Failed to login, are you sure your username and password are correct?')
|
||||||
self.logout_url = None
|
self.logout_url = None
|
||||||
link = br.find_link(text='(sign out)')
|
link = br.find_link(text='Sign out')
|
||||||
if link:
|
if link:
|
||||||
self.logout_url = link.absolute_url
|
self.logout_url = link.absolute_url
|
||||||
return br
|
return br
|
||||||
@ -54,56 +52,70 @@ class HBR(BasicNewsRecipe):
|
|||||||
if url.endswith('/ar/1'):
|
if url.endswith('/ar/1'):
|
||||||
return url[:-1]+'pr'
|
return url[:-1]+'pr'
|
||||||
|
|
||||||
def get_features(self, soup):
|
|
||||||
div = soup.find('div', id='issueFeatures')
|
|
||||||
for li in div.findAll('li'):
|
|
||||||
a = li.find('a', href=True)
|
|
||||||
url = 'http://hbr.harvardbusiness.org'+a['href']
|
|
||||||
url = self.map_url(url)
|
|
||||||
if not url:
|
|
||||||
continue
|
|
||||||
title = self.tag_to_string(a)
|
|
||||||
p = li.find('p')
|
|
||||||
desc = ''
|
|
||||||
if p is not None:
|
|
||||||
desc = self.tag_to_string(p)
|
|
||||||
yield {'title':title, 'url':url, 'description':desc}
|
|
||||||
|
|
||||||
def get_departments(self, soup):
|
def hbr_get_toc(self):
|
||||||
div = soup.find('div', id='issueDepartmentsContent')
|
soup = self.index_to_soup(self.INDEX)
|
||||||
for h4 in div.findAll('h4'):
|
url = soup.find('a', text=lambda t:'Full Table of Contents' in t).parent.get('href')
|
||||||
feed = self.tag_to_string(h4)
|
return self.index_to_soup('http://hbr.org'+url)
|
||||||
articles = []
|
|
||||||
ul = h4.findNextSibling('ul')
|
def hbr_parse_section(self, container, feeds):
|
||||||
for li in ul.findAll('li'):
|
current_section = None
|
||||||
a = li.find('a', href=True)
|
current_articles = []
|
||||||
url = 'http://hbr.harvardbusiness.org'+a['href']
|
for x in container.findAll(name=['li', 'h3', 'h4']):
|
||||||
url = self.map_url(url)
|
if x.name in ['h3', 'h4'] and not x.findAll(True):
|
||||||
if not url:
|
if current_section and current_articles:
|
||||||
continue
|
feeds.append((current_section, current_articles))
|
||||||
title = self.tag_to_string(a)
|
current_section = self.tag_to_string(x)
|
||||||
p = li.find('p')
|
current_articles = []
|
||||||
desc = ''
|
self.log('\tFound section:', current_section)
|
||||||
if p is not None:
|
if x.name == 'li':
|
||||||
desc = self.tag_to_string(p)
|
a = x.find('a', href=True)
|
||||||
articles.append({'title':title, 'url':url, 'description':desc})
|
if a is not None:
|
||||||
yield [feed, articles]
|
title = self.tag_to_string(a)
|
||||||
|
url = a.get('href')
|
||||||
|
if '/ar/' not in url:
|
||||||
|
continue
|
||||||
|
if url.startswith('/'):
|
||||||
|
url = 'http://hbr.org'+url
|
||||||
|
url = self.map_url(url)
|
||||||
|
p = x.find('p')
|
||||||
|
desc = ''
|
||||||
|
if p is not None:
|
||||||
|
desc = self.tag_to_string(p)
|
||||||
|
if not title or not url:
|
||||||
|
continue
|
||||||
|
self.log('\t\tFound article:', title)
|
||||||
|
self.log('\t\t\t', url)
|
||||||
|
self.log('\t\t\t', desc)
|
||||||
|
current_articles.append({'title':title, 'url':url,
|
||||||
|
'description':desc, 'date':''})
|
||||||
|
if current_section and current_articles:
|
||||||
|
feeds.append((current_section, current_articles))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def hbr_parse_toc(self, soup):
|
||||||
|
feeds = []
|
||||||
|
features = soup.find(id='issueFeaturesContent')
|
||||||
|
self.hbr_parse_section(features, feeds)
|
||||||
|
departments = soup.find(id='issueDepartments')
|
||||||
|
self.hbr_parse_section(departments, feeds)
|
||||||
|
return feeds
|
||||||
|
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
soup = self.index_to_soup(self.INDEX)
|
soup = self.hbr_get_toc()
|
||||||
feeds = []
|
feeds = self.hbr_parse_toc(soup)
|
||||||
feeds.append(('Features', list(self.get_features(soup))))
|
|
||||||
feeds.extend(self.get_departments(soup))
|
|
||||||
return feeds
|
return feeds
|
||||||
|
|
||||||
def get_cover_url(self):
|
def get_cover_url(self):
|
||||||
cover_url = None
|
cover_url = None
|
||||||
index = 'http://hbr.harvardbusiness.org/current'
|
index = 'http://hbr.org/current'
|
||||||
soup = self.index_to_soup(index)
|
soup = self.index_to_soup(index)
|
||||||
link_item = soup.find('img', alt=re.compile("HBR Cover Image"), src=True)
|
link_item = soup.find('img', alt=re.compile("Current Issue"), src=True)
|
||||||
|
|
||||||
if link_item:
|
if link_item:
|
||||||
cover_url = 'http://hbr.harvardbusiness.org' + link_item['src']
|
cover_url = 'http://hbr.org' + link_item['src']
|
||||||
|
|
||||||
return cover_url
|
return cover_url
|
||||||
|
|
||||||
|
@ -38,6 +38,9 @@ class RecipeInput(InputFormatPlugin):
|
|||||||
OptionRecommendation(name='password', recommended_value=None,
|
OptionRecommendation(name='password', recommended_value=None,
|
||||||
help=_('Password for sites that require a login to access '
|
help=_('Password for sites that require a login to access '
|
||||||
'content.')),
|
'content.')),
|
||||||
|
OptionRecommendation(name='dont_download_recipe',
|
||||||
|
recommended_value=False,
|
||||||
|
help=_('Download latest version of builtin recipes')),
|
||||||
OptionRecommendation(name='lrf', recommended_value=False,
|
OptionRecommendation(name='lrf', recommended_value=False,
|
||||||
help='Optimize fetching for subsequent conversion to LRF.'),
|
help='Optimize fetching for subsequent conversion to LRF.'),
|
||||||
])
|
])
|
||||||
@ -52,7 +55,8 @@ class RecipeInput(InputFormatPlugin):
|
|||||||
else:
|
else:
|
||||||
title = getattr(opts, 'original_recipe_input_arg', recipe_or_file)
|
title = getattr(opts, 'original_recipe_input_arg', recipe_or_file)
|
||||||
title = os.path.basename(title).rpartition('.')[0]
|
title = os.path.basename(title).rpartition('.')[0]
|
||||||
raw = get_builtin_recipe_by_title(title, log=log, download_recipe=True)
|
raw = get_builtin_recipe_by_title(title, log=log,
|
||||||
|
download_recipe=not opts.dont_download_recipe)
|
||||||
builtin = False
|
builtin = False
|
||||||
try:
|
try:
|
||||||
recipe = compile_recipe(raw)
|
recipe = compile_recipe(raw)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user