mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Merge branch 'kovidgoyal:master' into tolino
This commit is contained in:
commit
807dde4608
@ -46,13 +46,21 @@ class ancientegypt(BasicNewsRecipe):
|
|||||||
exp.name = 'p'
|
exp.name = 'p'
|
||||||
return soup
|
return soup
|
||||||
|
|
||||||
|
recipe_specific_options = {
|
||||||
|
'issue': {
|
||||||
|
'short': 'Enter the Issue Number you want to download ',
|
||||||
|
'long': 'For example, 136'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
soup = self.index_to_soup('https://the-past.com/category/magazines/ae/')
|
soup = self.index_to_soup('https://the-past.com/category/magazines/ae/')
|
||||||
art = soup.find('article', attrs={'class':lambda x: x and 'tag-magazines' in x.split()})
|
art = soup.find('article', attrs={'class':lambda x: x and 'tag-magazines' in x.split()})
|
||||||
url = art.h2.a['href']
|
url = art.h2.a['href']
|
||||||
|
|
||||||
# for past editions, add url
|
d = self.recipe_specific_options.get('issue')
|
||||||
# url = ''
|
if d and isinstance(d, str):
|
||||||
|
url = 'https://the-past.com/magazines/ae/ancient-egypt-magazine-' + d + '/'
|
||||||
|
|
||||||
issue = self.index_to_soup(url)
|
issue = self.index_to_soup(url)
|
||||||
ti = issue.find('h1', attrs={'class':lambda x: x and 'post-title' in x.split()})
|
ti = issue.find('h1', attrs={'class':lambda x: x and 'post-title' in x.split()})
|
||||||
|
@ -199,7 +199,9 @@ class TheAtlantic(BasicNewsRecipe):
|
|||||||
self.cover_url = img['src']
|
self.cover_url = img['src']
|
||||||
current_section, current_articles = 'Cover Story', []
|
current_section, current_articles = 'Cover Story', []
|
||||||
feeds = []
|
feeds = []
|
||||||
for x in soup.findAll(**prefix_classes('TocFeaturedSection_heading__ TocSection_heading__ TocHeroGridItem_hedLink__ TocGridItem_hedLink__')):
|
for x in soup.findAll(**prefix_classes(
|
||||||
|
'TocFeaturedSection_heading__ TocSection_heading__ TocHeroGridItem_hedLink__ TocGridItem_hedLink__ RiverGridItem_hedLink__'
|
||||||
|
)):
|
||||||
cls = x['class']
|
cls = x['class']
|
||||||
if not isinstance(cls, str):
|
if not isinstance(cls, str):
|
||||||
cls = ' '.join(cls)
|
cls = ' '.join(cls)
|
||||||
|
@ -5,9 +5,6 @@ from datetime import datetime
|
|||||||
from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
# https://www.bloomberg.com/magazine/businessweek/24_12
|
|
||||||
# Set past_edition to edition id, which is '24_12'.
|
|
||||||
past_edition = None
|
|
||||||
|
|
||||||
def get_contents(x):
|
def get_contents(x):
|
||||||
if x == '':
|
if x == '':
|
||||||
@ -47,7 +44,7 @@ def get_contents(x):
|
|||||||
|
|
||||||
class Bloomberg(BasicNewsRecipe):
|
class Bloomberg(BasicNewsRecipe):
|
||||||
title = 'Bloomberg Businessweek'
|
title = 'Bloomberg Businessweek'
|
||||||
language = 'en'
|
language = 'en_US'
|
||||||
__author__ = 'unkn0wn'
|
__author__ = 'unkn0wn'
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
remove_attributes = ['style', 'height', 'width']
|
remove_attributes = ['style', 'height', 'width']
|
||||||
@ -60,6 +57,13 @@ class Bloomberg(BasicNewsRecipe):
|
|||||||
)
|
)
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
|
|
||||||
|
recipe_specific_options = {
|
||||||
|
'date': {
|
||||||
|
'short': 'The ID of the edition to download (YY_XX format)',
|
||||||
|
'long': 'For example, 24_17\nHint: Edition ID can be found at the end of its URL'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
remove_tags = [
|
remove_tags = [
|
||||||
dict(name=['button', 'svg', 'meta']),
|
dict(name=['button', 'svg', 'meta']),
|
||||||
dict(name='div', attrs={'id':['bb-that', 'bb-nav']}),
|
dict(name='div', attrs={'id':['bb-that', 'bb-nav']}),
|
||||||
@ -82,7 +86,8 @@ class Bloomberg(BasicNewsRecipe):
|
|||||||
inx = 'https://cdn-mobapi.bloomberg.com'
|
inx = 'https://cdn-mobapi.bloomberg.com'
|
||||||
sec = self.index_to_soup(inx + '/wssmobile/v1/bw/news/list?limit=1', raw=True)
|
sec = self.index_to_soup(inx + '/wssmobile/v1/bw/news/list?limit=1', raw=True)
|
||||||
id = json.loads(sec)['magazines'][0]['id']
|
id = json.loads(sec)['magazines'][0]['id']
|
||||||
if past_edition:
|
past_edition = self.recipe_specific_options.get('date')
|
||||||
|
if past_edition and isinstance(past_edition, str):
|
||||||
id = past_edition
|
id = past_edition
|
||||||
edit = self.index_to_soup(inx + '/wssmobile/v1/bw/news/week/' + id, raw=True)
|
edit = self.index_to_soup(inx + '/wssmobile/v1/bw/news/week/' + id, raw=True)
|
||||||
d = json.loads(edit)
|
d = json.loads(edit)
|
||||||
|
@ -55,7 +55,7 @@ class Bloomberg(BasicNewsRecipe):
|
|||||||
'Bloomberg delivers business and markets news, data, analysis, and video'
|
'Bloomberg delivers business and markets news, data, analysis, and video'
|
||||||
' to the world, featuring stories from Businessweek and Bloomberg News.'
|
' to the world, featuring stories from Businessweek and Bloomberg News.'
|
||||||
)
|
)
|
||||||
oldest_article = 1 # days
|
oldest_article = 1.2 # days
|
||||||
resolve_internal_links = True
|
resolve_internal_links = True
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
cover_url = 'https://assets.bwbx.io/images/users/iqjWHBFdfxIU/ivUxvlPidC3M/v0/600x-1.jpg'
|
cover_url = 'https://assets.bwbx.io/images/users/iqjWHBFdfxIU/ivUxvlPidC3M/v0/600x-1.jpg'
|
||||||
@ -78,7 +78,18 @@ class Bloomberg(BasicNewsRecipe):
|
|||||||
.news-figure-credit {font-size:small; text-align:center; color:#202020;}
|
.news-figure-credit {font-size:small; text-align:center; color:#202020;}
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
recipe_specific_options = {
|
||||||
|
'days': {
|
||||||
|
'short': 'Oldest article to download from this news source. In days ',
|
||||||
|
'long': 'For example, 0.5, gives you articles for the past 12 hours',
|
||||||
|
'default': str(oldest_article),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
|
d = self.recipe_specific_options.get('days')
|
||||||
|
if d and isinstance(d, str):
|
||||||
|
self.oldest_article = float(d)
|
||||||
inx = 'https://cdn-mobapi.bloomberg.com'
|
inx = 'https://cdn-mobapi.bloomberg.com'
|
||||||
sec = self.index_to_soup(inx + '/wssmobile/v1/navigation/bloomberg_app/search-v2', raw=True)
|
sec = self.index_to_soup(inx + '/wssmobile/v1/navigation/bloomberg_app/search-v2', raw=True)
|
||||||
sec_data = json.loads(sec)['searchNav']
|
sec_data = json.loads(sec)['searchNav']
|
||||||
|
@ -4,12 +4,6 @@ from datetime import datetime
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
from html5_parser import parse
|
from html5_parser import parse
|
||||||
|
|
||||||
today = datetime.today().strftime('%d-%m-%Y')
|
|
||||||
|
|
||||||
# today = '20-09-2023'
|
|
||||||
|
|
||||||
day, month, year = (int(x) for x in today.split('-'))
|
|
||||||
dt = datetime(year, month, day)
|
|
||||||
|
|
||||||
class BusinessStandardPrint(BasicNewsRecipe):
|
class BusinessStandardPrint(BasicNewsRecipe):
|
||||||
title = 'Business Standard Print Edition'
|
title = 'Business Standard Print Edition'
|
||||||
@ -18,18 +12,12 @@ class BusinessStandardPrint(BasicNewsRecipe):
|
|||||||
language = 'en_IN'
|
language = 'en_IN'
|
||||||
masthead_url = 'https://bsmedia.business-standard.com/include/_mod/site/html5/images/business-standard-logo.png'
|
masthead_url = 'https://bsmedia.business-standard.com/include/_mod/site/html5/images/business-standard-logo.png'
|
||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
timefmt = ' [' + dt.strftime('%b %d, %Y') + ']'
|
|
||||||
resolve_internal_links = True
|
resolve_internal_links = True
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
|
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
remove_javascript = True
|
remove_javascript = True
|
||||||
remove_attributes = ['width', 'height', 'float', 'style']
|
remove_attributes = ['width', 'height', 'style']
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
BasicNewsRecipe.__init__(self, *args, **kwargs)
|
|
||||||
if self.output_profile.short_name.startswith('kindle'):
|
|
||||||
self.title = 'Business Standard ' + dt.strftime('%b %d, %Y')
|
|
||||||
|
|
||||||
def get_browser(self):
|
def get_browser(self):
|
||||||
return BasicNewsRecipe.get_browser(self, user_agent='common_words/based')
|
return BasicNewsRecipe.get_browser(self, user_agent='common_words/based')
|
||||||
@ -40,16 +28,35 @@ class BusinessStandardPrint(BasicNewsRecipe):
|
|||||||
|
|
||||||
extra_css = '''
|
extra_css = '''
|
||||||
img {display:block; margin:0 auto;}
|
img {display:block; margin:0 auto;}
|
||||||
|
.sub { font-style:italic; color:#202020; }
|
||||||
.auth, .cat { font-size:small; color:#202020; }
|
.auth, .cat { font-size:small; color:#202020; }
|
||||||
.cap { font-size:small; text-align:center; }
|
.cap { font-size:small; text-align:center; }
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
recipe_specific_options = {
|
||||||
|
'date': {
|
||||||
|
'short': 'The date of the print edition to download (DD-MM-YYYY format)',
|
||||||
|
'long': 'For example, 20-09-2023'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
def get_cover_url(self):
|
def get_cover_url(self):
|
||||||
soup = self.index_to_soup('https://www.magzter.com/IN/Business-Standard-Private-Ltd/Business-Standard/Newspaper/')
|
d = self.recipe_specific_options.get('date')
|
||||||
for citem in soup.findAll('meta', content=lambda s: s and s.endswith('view/3.jpg')):
|
if not (d and isinstance(d, str)):
|
||||||
return citem['content']
|
soup = self.index_to_soup('https://www.magzter.com/IN/Business-Standard-Private-Ltd/Business-Standard/Newspaper/')
|
||||||
|
for citem in soup.findAll('meta', content=lambda s: s and s.endswith('view/3.jpg')):
|
||||||
|
return citem['content']
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
|
today = datetime.today().strftime('%d-%m-%Y')
|
||||||
|
d = self.recipe_specific_options.get('date')
|
||||||
|
if d and isinstance(d, str):
|
||||||
|
today = d
|
||||||
|
|
||||||
|
day, month, year = (int(x) for x in today.split('-'))
|
||||||
|
dt = datetime(year, month, day)
|
||||||
|
self.timefmt = ' [' + dt.strftime('%b %d, %Y') + ']'
|
||||||
|
|
||||||
if dt.weekday() == 6:
|
if dt.weekday() == 6:
|
||||||
self.log.warn(
|
self.log.warn(
|
||||||
'Business Standard Does Not Have A Print Publication On Sunday. The Reports'
|
'Business Standard Does Not Have A Print Publication On Sunday. The Reports'
|
||||||
@ -97,10 +104,10 @@ class BusinessStandardPrint(BasicNewsRecipe):
|
|||||||
|
|
||||||
if 'defaultArticleCat' in data and data['defaultArticleCat'] is not None:
|
if 'defaultArticleCat' in data and data['defaultArticleCat'] is not None:
|
||||||
if 'h1_tag' in data['defaultArticleCat'] and data['defaultArticleCat']['h1_tag'] is not None:
|
if 'h1_tag' in data['defaultArticleCat'] and data['defaultArticleCat']['h1_tag'] is not None:
|
||||||
cat = '<div><p class="cat">' + data['defaultArticleCat']['h1_tag'] + '</p></div>'
|
cat = '<div class="cat">' + data['defaultArticleCat']['h1_tag'] + '</div>'
|
||||||
|
|
||||||
if 'metaDescription' in data and data['metaDescription'] is not None:
|
if 'metaDescription' in data and data['metaDescription'] is not None:
|
||||||
subhead = '<h3>' + data['metaDescription'] + '</h3>'
|
subhead = '<p class="sub">' + data['metaDescription'] + '</p>'
|
||||||
self.art_desc = data['metaDescription']
|
self.art_desc = data['metaDescription']
|
||||||
|
|
||||||
date = (datetime.fromtimestamp(int(data['publishDate']))).strftime('%b %d, %Y | %I:%M %p')
|
date = (datetime.fromtimestamp(int(data['publishDate']))).strftime('%b %d, %Y | %I:%M %p')
|
||||||
@ -120,6 +127,13 @@ class BusinessStandardPrint(BasicNewsRecipe):
|
|||||||
if 'alt_text' in data['featuredImageObj']:
|
if 'alt_text' in data['featuredImageObj']:
|
||||||
caption = '<span>' + data['featuredImageObj']['alt_text'] + '</span></p>'
|
caption = '<span>' + data['featuredImageObj']['alt_text'] + '</span></p>'
|
||||||
|
|
||||||
body = data['htmlContent']
|
body = data['htmlContent'].replace('<br />\r\n\t\t ', '<br>')
|
||||||
|
|
||||||
return '<html><body>' + cat + title + subhead + auth + lede + caption + '<div><p></p>' + body + '</div></body></html>'
|
return '<html><body>' + cat + title + subhead + auth + lede + caption + '<div><br>' + body + '</div></body></html>'
|
||||||
|
|
||||||
|
def preprocess_html(self, soup):
|
||||||
|
for img in soup.findAll('img'):
|
||||||
|
img.attrs = {'src': img.get('src', '')}
|
||||||
|
for x in soup.findAll('div'):
|
||||||
|
x.attrs = {'class': x.get('class', '')}
|
||||||
|
return soup
|
||||||
|
@ -215,7 +215,7 @@ class Economist(BasicNewsRecipe):
|
|||||||
|
|
||||||
def publication_date(self):
|
def publication_date(self):
|
||||||
edition_date = self.recipe_specific_options.get('date')
|
edition_date = self.recipe_specific_options.get('date')
|
||||||
if edition_date:
|
if edition_date and isinstance(edition_date, str):
|
||||||
return parse_only_date(edition_date, as_utc=False)
|
return parse_only_date(edition_date, as_utc=False)
|
||||||
url = self.browser.open("https://www.economist.com/printedition").geturl()
|
url = self.browser.open("https://www.economist.com/printedition").geturl()
|
||||||
return parse_only_date(url.split("/")[-1], as_utc=False)
|
return parse_only_date(url.split("/")[-1], as_utc=False)
|
||||||
@ -245,7 +245,7 @@ class Economist(BasicNewsRecipe):
|
|||||||
'operationName': 'LatestWeeklyAutoEditionQuery',
|
'operationName': 'LatestWeeklyAutoEditionQuery',
|
||||||
'variables': '{"ref":"/content/d06tg8j85rifiq3oo544c6b9j61dno2n"}',
|
'variables': '{"ref":"/content/d06tg8j85rifiq3oo544c6b9j61dno2n"}',
|
||||||
}
|
}
|
||||||
if edition_date:
|
if edition_date and isinstance(edition_date, str):
|
||||||
url = 'https://www.economist.com/weeklyedition/' + edition_date
|
url = 'https://www.economist.com/weeklyedition/' + edition_date
|
||||||
soup = self.index_to_soup(url)
|
soup = self.index_to_soup(url)
|
||||||
script_tag = soup.find("script", id="__NEXT_DATA__")
|
script_tag = soup.find("script", id="__NEXT_DATA__")
|
||||||
@ -268,7 +268,7 @@ class Economist(BasicNewsRecipe):
|
|||||||
|
|
||||||
def economist_parse_index(self, raw):
|
def economist_parse_index(self, raw):
|
||||||
edition_date = self.recipe_specific_options.get('date')
|
edition_date = self.recipe_specific_options.get('date')
|
||||||
if edition_date:
|
if edition_date and isinstance(edition_date, str):
|
||||||
data = json.loads(raw)['data']['section']
|
data = json.loads(raw)['data']['section']
|
||||||
else:
|
else:
|
||||||
data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
|
data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
|
||||||
@ -336,7 +336,7 @@ class Economist(BasicNewsRecipe):
|
|||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
edition_date = self.recipe_specific_options.get('date')
|
edition_date = self.recipe_specific_options.get('date')
|
||||||
# return self.economist_test_article()
|
# return self.economist_test_article()
|
||||||
if edition_date:
|
if edition_date and isinstance(edition_date, str):
|
||||||
url = 'https://www.economist.com/weeklyedition/' + edition_date
|
url = 'https://www.economist.com/weeklyedition/' + edition_date
|
||||||
self.timefmt = ' [' + edition_date + ']'
|
self.timefmt = ' [' + edition_date + ']'
|
||||||
else:
|
else:
|
||||||
@ -423,10 +423,11 @@ class Economist(BasicNewsRecipe):
|
|||||||
x.set('style', 'color:#404040;')
|
x.set('style', 'color:#404040;')
|
||||||
raw = etree.tostring(root, encoding='unicode')
|
raw = etree.tostring(root, encoding='unicode')
|
||||||
return raw
|
return raw
|
||||||
|
|
||||||
def parse_index_from_printedition(self):
|
def parse_index_from_printedition(self):
|
||||||
# return self.economist_test_article()
|
# return self.economist_test_article()
|
||||||
edition_date = self.recipe_specific_options.get('date')
|
edition_date = self.recipe_specific_options.get('date')
|
||||||
if edition_date:
|
if edition_date and isinstance(edition_date, str):
|
||||||
url = 'https://www.economist.com/weeklyedition/' + edition_date
|
url = 'https://www.economist.com/weeklyedition/' + edition_date
|
||||||
self.timefmt = ' [' + edition_date + ']'
|
self.timefmt = ' [' + edition_date + ']'
|
||||||
else:
|
else:
|
||||||
|
@ -215,7 +215,7 @@ class Economist(BasicNewsRecipe):
|
|||||||
|
|
||||||
def publication_date(self):
|
def publication_date(self):
|
||||||
edition_date = self.recipe_specific_options.get('date')
|
edition_date = self.recipe_specific_options.get('date')
|
||||||
if edition_date:
|
if edition_date and isinstance(edition_date, str):
|
||||||
return parse_only_date(edition_date, as_utc=False)
|
return parse_only_date(edition_date, as_utc=False)
|
||||||
url = self.browser.open("https://www.economist.com/printedition").geturl()
|
url = self.browser.open("https://www.economist.com/printedition").geturl()
|
||||||
return parse_only_date(url.split("/")[-1], as_utc=False)
|
return parse_only_date(url.split("/")[-1], as_utc=False)
|
||||||
@ -245,7 +245,7 @@ class Economist(BasicNewsRecipe):
|
|||||||
'operationName': 'LatestWeeklyAutoEditionQuery',
|
'operationName': 'LatestWeeklyAutoEditionQuery',
|
||||||
'variables': '{"ref":"/content/d06tg8j85rifiq3oo544c6b9j61dno2n"}',
|
'variables': '{"ref":"/content/d06tg8j85rifiq3oo544c6b9j61dno2n"}',
|
||||||
}
|
}
|
||||||
if edition_date:
|
if edition_date and isinstance(edition_date, str):
|
||||||
url = 'https://www.economist.com/weeklyedition/' + edition_date
|
url = 'https://www.economist.com/weeklyedition/' + edition_date
|
||||||
soup = self.index_to_soup(url)
|
soup = self.index_to_soup(url)
|
||||||
script_tag = soup.find("script", id="__NEXT_DATA__")
|
script_tag = soup.find("script", id="__NEXT_DATA__")
|
||||||
@ -268,7 +268,7 @@ class Economist(BasicNewsRecipe):
|
|||||||
|
|
||||||
def economist_parse_index(self, raw):
|
def economist_parse_index(self, raw):
|
||||||
edition_date = self.recipe_specific_options.get('date')
|
edition_date = self.recipe_specific_options.get('date')
|
||||||
if edition_date:
|
if edition_date and isinstance(edition_date, str):
|
||||||
data = json.loads(raw)['data']['section']
|
data = json.loads(raw)['data']['section']
|
||||||
else:
|
else:
|
||||||
data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
|
data = json.loads(raw)['data']['canonical']['hasPart']['parts'][0]
|
||||||
@ -336,7 +336,7 @@ class Economist(BasicNewsRecipe):
|
|||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
edition_date = self.recipe_specific_options.get('date')
|
edition_date = self.recipe_specific_options.get('date')
|
||||||
# return self.economist_test_article()
|
# return self.economist_test_article()
|
||||||
if edition_date:
|
if edition_date and isinstance(edition_date, str):
|
||||||
url = 'https://www.economist.com/weeklyedition/' + edition_date
|
url = 'https://www.economist.com/weeklyedition/' + edition_date
|
||||||
self.timefmt = ' [' + edition_date + ']'
|
self.timefmt = ' [' + edition_date + ']'
|
||||||
else:
|
else:
|
||||||
@ -423,10 +423,11 @@ class Economist(BasicNewsRecipe):
|
|||||||
x.set('style', 'color:#404040;')
|
x.set('style', 'color:#404040;')
|
||||||
raw = etree.tostring(root, encoding='unicode')
|
raw = etree.tostring(root, encoding='unicode')
|
||||||
return raw
|
return raw
|
||||||
|
|
||||||
def parse_index_from_printedition(self):
|
def parse_index_from_printedition(self):
|
||||||
# return self.economist_test_article()
|
# return self.economist_test_article()
|
||||||
edition_date = self.recipe_specific_options.get('date')
|
edition_date = self.recipe_specific_options.get('date')
|
||||||
if edition_date:
|
if edition_date and isinstance(edition_date, str):
|
||||||
url = 'https://www.economist.com/weeklyedition/' + edition_date
|
url = 'https://www.economist.com/weeklyedition/' + edition_date
|
||||||
self.timefmt = ' [' + edition_date + ']'
|
self.timefmt = ' [' + edition_date + ']'
|
||||||
else:
|
else:
|
||||||
|
@ -54,11 +54,24 @@ class Harpers(BasicNewsRecipe):
|
|||||||
img['src'] = src.split()[0]
|
img['src'] = src.split()[0]
|
||||||
return soup
|
return soup
|
||||||
|
|
||||||
|
recipe_specific_options = {
|
||||||
|
'date': {
|
||||||
|
'short': 'The date of the edition to download (YYYY/MM format)',
|
||||||
|
'long': 'For example, 2023/08',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
issues_soup = self.index_to_soup("https://harpers.org/issues/")
|
issues_soup = self.index_to_soup("https://harpers.org/issues/")
|
||||||
a_ele = issues_soup.select_one("div.issue-card a")
|
a_ele = issues_soup.select_one("div.issue-card a")
|
||||||
self.timefmt = ' [' + self.tag_to_string(a_ele.find(attrs={'class':'issue-title'})) + ']'
|
self.timefmt = ' [' + self.tag_to_string(a_ele.find(attrs={'class':'issue-title'})) + ']'
|
||||||
url = a_ele['href']
|
url = a_ele['href']
|
||||||
|
|
||||||
|
edition = self.recipe_specific_options.get('date')
|
||||||
|
if edition and isinstance(edition, str):
|
||||||
|
url = 'https://harpers.org/archive/' + edition
|
||||||
|
self.timefmt = ' [' +edition + ']'
|
||||||
|
|
||||||
soup = self.index_to_soup(url)
|
soup = self.index_to_soup(url)
|
||||||
cov_div = soup.find('div', attrs={'class':'issue-cover'})
|
cov_div = soup.find('div', attrs={'class':'issue-cover'})
|
||||||
if cov_div:
|
if cov_div:
|
||||||
|
@ -32,7 +32,8 @@ class TheHindu(BasicNewsRecipe):
|
|||||||
recipe_specific_options = {
|
recipe_specific_options = {
|
||||||
'location': {
|
'location': {
|
||||||
'short': 'The name of the local edition',
|
'short': 'The name of the local edition',
|
||||||
'long': 'If The Hindu is available in your local town/city,\nset this to your location, for example, hyderabad'
|
'long': 'If The Hindu is available in your local town/city,\nset this to your location, for example, hyderabad',
|
||||||
|
'default': 'international'
|
||||||
},
|
},
|
||||||
'date': {
|
'date': {
|
||||||
'short': 'The date of the edition to download (YYYY-MM-DD format)',
|
'short': 'The date of the edition to download (YYYY-MM-DD format)',
|
||||||
@ -60,47 +61,32 @@ class TheHindu(BasicNewsRecipe):
|
|||||||
return soup
|
return soup
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
mag_url = None
|
local_edition = 'th_international'
|
||||||
local_edition = self.recipe_specific_options.get('location')
|
d = self.recipe_specific_options.get('location')
|
||||||
if local_edition:
|
if d and isinstance(d, str):
|
||||||
local_edition = 'th_' + local_edition
|
local_edition = 'th_' + d
|
||||||
|
|
||||||
past_edition = self.recipe_specific_options.get('date')
|
past_edition = self.recipe_specific_options.get('date')
|
||||||
|
|
||||||
dt = date.today()
|
dt = date.today()
|
||||||
if past_edition:
|
if past_edition and isinstance(past_edition, str):
|
||||||
year, month, day = (int(x) for x in past_edition.split('-'))
|
year, month, day = (int(x) for x in past_edition.split('-'))
|
||||||
dt = date(year, month, day)
|
dt = date(year, month, day)
|
||||||
|
|
||||||
is_monday = dt.weekday() == 0
|
today = dt.strftime('%Y-%m-%d')
|
||||||
is_friday = dt.weekday() == 4
|
|
||||||
is_saturday = dt.weekday() == 5
|
|
||||||
is_sunday = dt.weekday() == 6
|
|
||||||
|
|
||||||
|
self.log('Downloading The Hindu, ' + local_edition[3:] + ' edition, ' + today)
|
||||||
|
url = absurl('/todays-paper/' + today + '/' + local_edition + '/')
|
||||||
|
|
||||||
if local_edition or past_edition:
|
mag_url = None
|
||||||
if local_edition is None:
|
if dt.weekday() == 0:
|
||||||
local_edition = 'th_chennai'
|
mag_url = url + '?supplement=' + local_edition + '-epbs'
|
||||||
today = date.today().strftime('%Y-%m-%d')
|
if dt.weekday() == 4:
|
||||||
if past_edition:
|
mag_url = url + '?supplement=' + local_edition + '-fr'
|
||||||
today = past_edition
|
if dt.weekday() == 5:
|
||||||
self.log('Downloading past edition of', local_edition + ' from ' + today)
|
mag_url = url + '?supplement=' + local_edition + '-mp'
|
||||||
url = absurl('/todays-paper/' + today + '/' + local_edition + '/')
|
if dt.weekday() == 6:
|
||||||
if is_monday:
|
mag_url = url + '?supplement=' + local_edition + '-sm'
|
||||||
mag_url = url + '?supplement=' + local_edition + '-epbs'
|
|
||||||
if is_saturday:
|
|
||||||
mag_url = url + '?supplement=' + local_edition + '-mp'
|
|
||||||
if is_sunday:
|
|
||||||
mag_url = url + '?supplement=' + local_edition + '-sm'
|
|
||||||
else:
|
|
||||||
url = 'https://www.thehindu.com/todays-paper/'
|
|
||||||
if is_monday:
|
|
||||||
mag_url = url + '?supplement=th_chennai-epbs'
|
|
||||||
if is_friday:
|
|
||||||
mag_url = url + '?supplement=th_chennai-fr'
|
|
||||||
if is_saturday:
|
|
||||||
mag_url = url + '?supplement=th_chennai-mp'
|
|
||||||
if is_sunday:
|
|
||||||
mag_url = url + '?supplement=th_chennai-sm'
|
|
||||||
|
|
||||||
raw = self.index_to_soup(url, raw=True)
|
raw = self.index_to_soup(url, raw=True)
|
||||||
soup = self.index_to_soup(raw)
|
soup = self.index_to_soup(raw)
|
||||||
@ -139,7 +125,7 @@ class TheHindu(BasicNewsRecipe):
|
|||||||
title = item['articleheadline']
|
title = item['articleheadline']
|
||||||
url = absurl(item['href'])
|
url = absurl(item['href'])
|
||||||
desc = 'Page no.' + item['pageno'] + ' | ' + item['teaser_text'] or ''
|
desc = 'Page no.' + item['pageno'] + ' | ' + item['teaser_text'] or ''
|
||||||
self.log('\t', title, '\n\t\t', url)
|
self.log(' ', title, '\n\t', url)
|
||||||
feeds_dict[section].append({"title": title, "url": url, "description": desc})
|
feeds_dict[section].append({"title": title, "url": url, "description": desc})
|
||||||
return [(section, articles) for section, articles in feeds_dict.items()]
|
return [(section, articles) for section, articles in feeds_dict.items()]
|
||||||
else:
|
else:
|
||||||
|
@ -19,6 +19,20 @@ class lexfridman(BasicNewsRecipe):
|
|||||||
timefmt = ' [%b, %Y]'
|
timefmt = ' [%b, %Y]'
|
||||||
cover_url = 'https://i.scdn.co/image/ab6765630000ba8a563ebb538d297875b10114b7'
|
cover_url = 'https://i.scdn.co/image/ab6765630000ba8a563ebb538d297875b10114b7'
|
||||||
|
|
||||||
|
recipe_specific_options = {
|
||||||
|
'days': {
|
||||||
|
'short': 'Oldest article to download from this news source. In days ',
|
||||||
|
'long': 'For example, 0.5, gives you articles from the past 12 hours',
|
||||||
|
'default': str(oldest_article)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
BasicNewsRecipe.__init__(self, *args, **kwargs)
|
||||||
|
d = self.recipe_specific_options.get('days')
|
||||||
|
if d and isinstance(d, str):
|
||||||
|
self.oldest_article = float(d)
|
||||||
|
|
||||||
extra_css = '''
|
extra_css = '''
|
||||||
.ts-name { font-weight:bold; }
|
.ts-name { font-weight:bold; }
|
||||||
.ts-timestamp { font-size:small; }
|
.ts-timestamp { font-size:small; }
|
||||||
|
@ -45,13 +45,21 @@ class milthist(BasicNewsRecipe):
|
|||||||
exp.name = 'p'
|
exp.name = 'p'
|
||||||
return soup
|
return soup
|
||||||
|
|
||||||
|
recipe_specific_options = {
|
||||||
|
'issue': {
|
||||||
|
'short': 'Enter the Issue Number you want to download ',
|
||||||
|
'long': 'For example, 136'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
soup = self.index_to_soup('https://the-past.com/category/magazines/mhm/')
|
soup = self.index_to_soup('https://the-past.com/category/magazines/mhm/')
|
||||||
art = soup.find('article', attrs={'class':lambda x: x and 'tag-magazines' in x.split()})
|
art = soup.find('article', attrs={'class':lambda x: x and 'tag-magazines' in x.split()})
|
||||||
url = art.h2.a['href']
|
url = art.h2.a['href']
|
||||||
|
|
||||||
# for past editions, add url
|
d = self.recipe_specific_options.get('issue')
|
||||||
# url = ''
|
if d and isinstance(d, str):
|
||||||
|
url = 'https://the-past.com/magazines/military-history-matters-' + d + '/'
|
||||||
|
|
||||||
issue = self.index_to_soup(url)
|
issue = self.index_to_soup(url)
|
||||||
ti = issue.find('h1', attrs={'class':lambda x: x and 'post-title' in x.split()})
|
ti = issue.find('h1', attrs={'class':lambda x: x and 'post-title' in x.split()})
|
||||||
|
@ -45,13 +45,21 @@ class minerva(BasicNewsRecipe):
|
|||||||
exp.name = 'p'
|
exp.name = 'p'
|
||||||
return soup
|
return soup
|
||||||
|
|
||||||
|
recipe_specific_options = {
|
||||||
|
'issue': {
|
||||||
|
'short': 'Enter the Issue Number you want to download ',
|
||||||
|
'long': 'For example, 136'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
soup = self.index_to_soup('https://the-past.com/category/magazines/minerva/')
|
soup = self.index_to_soup('https://the-past.com/category/magazines/minerva/')
|
||||||
art = soup.find('article', attrs={'class':lambda x: x and 'tag-magazines' in x.split()})
|
art = soup.find('article', attrs={'class':lambda x: x and 'tag-magazines' in x.split()})
|
||||||
url = art.h2.a['href']
|
url = art.h2.a['href']
|
||||||
|
|
||||||
# for past editions, add url
|
d = self.recipe_specific_options.get('issue')
|
||||||
# url = ''
|
if d and isinstance(d, str):
|
||||||
|
url = 'https://the-past.com/magazines/minerva-magazine-' + d + '/'
|
||||||
|
|
||||||
issue = self.index_to_soup(url)
|
issue = self.index_to_soup(url)
|
||||||
ti = issue.find('h1', attrs={'class':lambda x: x and 'post-title' in x.split()})
|
ti = issue.find('h1', attrs={'class':lambda x: x and 'post-title' in x.split()})
|
||||||
|
@ -10,9 +10,6 @@ from calibre import prepare_string_for_xml as escape
|
|||||||
from calibre.utils.iso8601 import parse_iso8601
|
from calibre.utils.iso8601 import parse_iso8601
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
edition = date.today().strftime('%B-%Y')
|
|
||||||
|
|
||||||
# edition = 'March-2023'
|
|
||||||
|
|
||||||
def classes(classes):
|
def classes(classes):
|
||||||
q = frozenset(classes.split(' '))
|
q = frozenset(classes.split(' '))
|
||||||
@ -175,7 +172,18 @@ class NatGeo(BasicNewsRecipe):
|
|||||||
.auth, .time, .sub { font-size:small; color:#5c5c5c; }
|
.auth, .time, .sub { font-size:small; color:#5c5c5c; }
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
recipe_specific_options = {
|
||||||
|
'date': {
|
||||||
|
'short': 'The date of the edition to download (Month-YYYY format)',
|
||||||
|
'long': 'For example, March-2023'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
|
edition = date.today().strftime('%B-%Y')
|
||||||
|
d = self.recipe_specific_options.get('date')
|
||||||
|
if d and isinstance(d, str):
|
||||||
|
edition = d
|
||||||
url = 'https://www.nationalgeographic.com/magazine/issue/' + edition.lower()
|
url = 'https://www.nationalgeographic.com/magazine/issue/' + edition.lower()
|
||||||
self.log('Downloading ', url)
|
self.log('Downloading ', url)
|
||||||
self.timefmt = ' [' + edition + ']'
|
self.timefmt = ' [' + edition + ']'
|
||||||
@ -185,7 +193,7 @@ class NatGeo(BasicNewsRecipe):
|
|||||||
self.cover_url = soup.find('meta', attrs={'property':'og:image'})['content'].split('?')[0] + '?w=1000'
|
self.cover_url = soup.find('meta', attrs={'property':'og:image'})['content'].split('?')[0] + '?w=1000'
|
||||||
|
|
||||||
name = soup.find(attrs={'class':lambda x: x and 'Header__Description' in x.split()})
|
name = soup.find(attrs={'class':lambda x: x and 'Header__Description' in x.split()})
|
||||||
self.title = 'National Geographic ' + self.tag_to_string(name)
|
# self.title = 'National Geographic ' + self.tag_to_string(name)
|
||||||
ans = {}
|
ans = {}
|
||||||
if photoart := soup.find(attrs={'class':lambda x: x and 'BgImagePromo__Container__Text__Link' in x.split()}):
|
if photoart := soup.find(attrs={'class':lambda x: x and 'BgImagePromo__Container__Text__Link' in x.split()}):
|
||||||
section = 'Photo Essay'
|
section = 'Photo Essay'
|
||||||
|
@ -53,7 +53,7 @@ class PsychologyToday(BasicNewsRecipe):
|
|||||||
a = soup.find(**classes('magazine-thumbnail')).a
|
a = soup.find(**classes('magazine-thumbnail')).a
|
||||||
url = a['href']
|
url = a['href']
|
||||||
past_edition = self.recipe_specific_options.get('date')
|
past_edition = self.recipe_specific_options.get('date')
|
||||||
if past_edition:
|
if past_edition and isinstance(past_edition, str):
|
||||||
url = '/us/magazine/archive/' + past_edition
|
url = '/us/magazine/archive/' + past_edition
|
||||||
soup = self.index_to_soup(absurl(url))
|
soup = self.index_to_soup(absurl(url))
|
||||||
cov = soup.find(**classes('content-header--cover-image'))
|
cov = soup.find(**classes('content-header--cover-image'))
|
||||||
|
@ -21,7 +21,7 @@ class Reuters(BasicNewsRecipe):
|
|||||||
masthead_url = 'https://www.reutersprofessional.com/wp-content/uploads/2024/03/primary-logo.svg'
|
masthead_url = 'https://www.reutersprofessional.com/wp-content/uploads/2024/03/primary-logo.svg'
|
||||||
language = 'en'
|
language = 'en'
|
||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
oldest_article = 2 # days
|
oldest_article = 1.2 # days
|
||||||
no_javascript = True
|
no_javascript = True
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
remove_attributes = ['style', 'height', 'width']
|
remove_attributes = ['style', 'height', 'width']
|
||||||
@ -30,10 +30,24 @@ class Reuters(BasicNewsRecipe):
|
|||||||
|
|
||||||
extra_css = '''
|
extra_css = '''
|
||||||
.label, .auth { font-size:small; color:#202020; }
|
.label, .auth { font-size:small; color:#202020; }
|
||||||
.figc { font-size:small; text-align:center; }
|
.figc { font-size:small; }
|
||||||
img {display:block; margin:0 auto;}
|
img {display:block; margin:0 auto;}
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
recipe_specific_options = {
|
||||||
|
'days': {
|
||||||
|
'short': 'Oldest article to download from this news source. In days ',
|
||||||
|
'long': 'For example, 0.5, gives you articles from the past 12 hours',
|
||||||
|
'default': str(oldest_article)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
BasicNewsRecipe.__init__(self, *args, **kwargs)
|
||||||
|
d = self.recipe_specific_options.get('days')
|
||||||
|
if d and isinstance(d, str):
|
||||||
|
self.oldest_article = float(d)
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
index = 'https://www.reuters.com'
|
index = 'https://www.reuters.com'
|
||||||
today = datetime.now()
|
today = datetime.now()
|
||||||
|
@ -48,9 +48,21 @@ class tls(BasicNewsRecipe):
|
|||||||
.det { font-size:small; color:#202020; font-weight:bold; }
|
.det { font-size:small; color:#202020; font-weight:bold; }
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
recipe_specific_options = {
|
||||||
|
'date': {
|
||||||
|
'short': 'The date of the edition to download\nlower case Month-DD-YYYY format',
|
||||||
|
'long': 'For example, july-12-2024',
|
||||||
|
'default': 'current-issue'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
# for past edition, change the issue link below
|
|
||||||
issue = 'https://www.the-tls.co.uk/issues/current-issue/'
|
issue = 'https://www.the-tls.co.uk/issues/current-issue/'
|
||||||
|
|
||||||
|
d = self.recipe_specific_options.get('date')
|
||||||
|
if d and isinstance(d, str):
|
||||||
|
issue = 'https://www.the-tls.co.uk/issues/' + d + '/'
|
||||||
|
|
||||||
url = 'https://www.the-tls.co.uk/wp-json/tls/v2/contents-page/' + get_id(issue)
|
url = 'https://www.the-tls.co.uk/wp-json/tls/v2/contents-page/' + get_id(issue)
|
||||||
raw = self.index_to_soup(url, raw=True)
|
raw = self.index_to_soup(url, raw=True)
|
||||||
data = json.loads(raw)
|
data = json.loads(raw)
|
||||||
|
@ -47,13 +47,21 @@ class worldarch(BasicNewsRecipe):
|
|||||||
exp.name = 'p'
|
exp.name = 'p'
|
||||||
return soup
|
return soup
|
||||||
|
|
||||||
|
recipe_specific_options = {
|
||||||
|
'issue': {
|
||||||
|
'short': 'Enter the Issue Number you want to download ',
|
||||||
|
'long': 'For example, 136'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
soup = self.index_to_soup('https://the-past.com/category/magazines/cwa/')
|
soup = self.index_to_soup('https://the-past.com/category/magazines/cwa/')
|
||||||
art = soup.find('article', attrs={'class':lambda x: x and 'tag-magazines' in x.split()})
|
art = soup.find('article', attrs={'class':lambda x: x and 'tag-magazines' in x.split()})
|
||||||
url = art.h2.a['href']
|
url = art.h2.a['href']
|
||||||
|
|
||||||
# for past editions, add url
|
d = self.recipe_specific_options.get('issue')
|
||||||
# url = ''
|
if d and isinstance(d, str):
|
||||||
|
url = 'https://the-past.com/magazines/current-world-archaeology-' + d + '/'
|
||||||
|
|
||||||
issue = self.index_to_soup(url)
|
issue = self.index_to_soup(url)
|
||||||
ti = issue.find('h1', attrs={'class':lambda x: x and 'post-title' in x.split()})
|
ti = issue.find('h1', attrs={'class':lambda x: x and 'post-title' in x.split()})
|
||||||
|
@ -107,7 +107,8 @@ class WSJ(BasicNewsRecipe):
|
|||||||
return soup
|
return soup
|
||||||
|
|
||||||
def _download_cover(self):
|
def _download_cover(self):
|
||||||
if not self.recipe_specific_options.get('date'):
|
d = self.recipe_specific_options.get('date')
|
||||||
|
if not (d and isinstance(d, str)):
|
||||||
import os
|
import os
|
||||||
from contextlib import closing
|
from contextlib import closing
|
||||||
|
|
||||||
@ -143,7 +144,7 @@ class WSJ(BasicNewsRecipe):
|
|||||||
past_edition = self.recipe_specific_options.get('date')
|
past_edition = self.recipe_specific_options.get('date')
|
||||||
|
|
||||||
for itm in catalog['items']:
|
for itm in catalog['items']:
|
||||||
if past_edition:
|
if past_edition and isinstance(past_edition, str):
|
||||||
if itm['key'] == 'ITPNEXTGEN' + past_edition:
|
if itm['key'] == 'ITPNEXTGEN' + past_edition:
|
||||||
key = itm['key']
|
key = itm['key']
|
||||||
manifest = itm['manifest']
|
manifest = itm['manifest']
|
||||||
|
Loading…
x
Reference in New Issue
Block a user