mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Merge branch 'master' of https://github.com/unkn0w7n/calibre
This commit is contained in:
commit
4232224213
@ -1,3 +1,5 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:fileencoding=utf-8
|
||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
|
|
||||||
from calibre.utils.date import parse_date
|
from calibre.utils.date import parse_date
|
||||||
@ -21,6 +23,11 @@ class mains(BasicNewsRecipe):
|
|||||||
use_embedded_content = False
|
use_embedded_content = False
|
||||||
oldest_article = 30 # days
|
oldest_article = 30 # days
|
||||||
masthead_url = 'https://www.theindiaforum.in/themes/the_india_forum/images/tif_logo.png'
|
masthead_url = 'https://www.theindiaforum.in/themes/the_india_forum/images/tif_logo.png'
|
||||||
|
extra_css = '''
|
||||||
|
[class*="caption"], [class*="references"], #article-author-top-container { font-size:small; }
|
||||||
|
[class*="blurb"] { font-style:italic; }
|
||||||
|
blockquote, em { color:#202020; }
|
||||||
|
'''
|
||||||
|
|
||||||
keep_only_tags = [
|
keep_only_tags = [
|
||||||
classes('article-lead-container block-views-blockarticle-block-1'),
|
classes('article-lead-container block-views-blockarticle-block-1'),
|
||||||
@ -28,6 +35,12 @@ class mains(BasicNewsRecipe):
|
|||||||
classes('block-field-blocknodearticlebody block-field-blocknodearticlefield-references')
|
classes('block-field-blocknodearticlebody block-field-blocknodearticlefield-references')
|
||||||
]
|
]
|
||||||
|
|
||||||
|
remove_tags = [
|
||||||
|
dict(name=['source', 'svg']),
|
||||||
|
dict(attrs={'src':lambda x: x and x.endswith('quote_logo.png')}),
|
||||||
|
classes('s_info')
|
||||||
|
]
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
soup = self.index_to_soup('https://www.theindiaforum.in/')
|
soup = self.index_to_soup('https://www.theindiaforum.in/')
|
||||||
ul = soup.find('ul', attrs={'class':'float-left'})
|
ul = soup.find('ul', attrs={'class':'float-left'})
|
||||||
|
@ -9,21 +9,6 @@ from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
|
|
||||||
def media_bucket(x):
|
|
||||||
if x.get('type', '') == 'image':
|
|
||||||
if x.get('subtype', '') == 'graphic' or 'images.wsj.net' not in x['manifest-url']:
|
|
||||||
return '<br><img src="{}"><div class="figc">{}</div>\n'.format(
|
|
||||||
x['manifest-url'], x['caption'] + '<i> ' + x['credit'] + '</i>'
|
|
||||||
)
|
|
||||||
return '<br><img src="{}"><div class="figc">{}</div>\n'.format(
|
|
||||||
x['manifest-url'].split('?')[0] + '?width=600', x['caption'] + '<i> ' + x['credit'] + '</i>'
|
|
||||||
)
|
|
||||||
if x.get('type', '') == 'video':
|
|
||||||
return '<br><a href="{}"><img src="{}"></a><div class="figc">{}</div>\n'.format(
|
|
||||||
x['share_link'], x['thumbnail_url'].split('?')[0] + '?width=600', x['caption'] + '<i> ' + x['credit'] + '</i>'
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
class WSJ(BasicNewsRecipe):
|
class WSJ(BasicNewsRecipe):
|
||||||
title = 'The Wall Street Journal'
|
title = 'The Wall Street Journal'
|
||||||
__author__ = 'unkn0wn'
|
__author__ = 'unkn0wn'
|
||||||
@ -43,6 +28,11 @@ class WSJ(BasicNewsRecipe):
|
|||||||
'date': {
|
'date': {
|
||||||
'short': 'The date of the edition to download (YYYYMMDD format)\nOnly the past 6 editions will be available ',
|
'short': 'The date of the edition to download (YYYYMMDD format)\nOnly the past 6 editions will be available ',
|
||||||
'long': 'For example, 20240513'
|
'long': 'For example, 20240513'
|
||||||
|
},
|
||||||
|
'res': {
|
||||||
|
'short': 'For hi-res images, select a resolution from the\nfollowing options: 800, 1000, 1200 or 1500',
|
||||||
|
'long': 'This is useful for non e-ink devices, and for a lower file size\nthan the default, use 400 or 300.',
|
||||||
|
'default': '600'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -69,6 +59,25 @@ class WSJ(BasicNewsRecipe):
|
|||||||
dict(name='p', attrs={'id':'orig-pubdate-string'})
|
dict(name='p', attrs={'id':'orig-pubdate-string'})
|
||||||
]
|
]
|
||||||
|
|
||||||
|
def media_bucket(self, x):
|
||||||
|
res = '?width=600'
|
||||||
|
w = self.recipe_specific_options.get('res')
|
||||||
|
if w and isinstance(w, str):
|
||||||
|
res = '?width=' + w
|
||||||
|
if x.get('type', '') == 'image':
|
||||||
|
if x.get('subtype', '') == 'graphic' or 'images.wsj.net' not in x['manifest-url']:
|
||||||
|
return '<br><img src="{}"><div class="figc">{}</div>\n'.format(
|
||||||
|
x['manifest-url'], x['caption'] + '<i> ' + x['credit'] + '</i>'
|
||||||
|
)
|
||||||
|
return '<br><img src="{}"><div class="figc">{}</div>\n'.format(
|
||||||
|
x['manifest-url'].split('?')[0] + res, x['caption'] + '<i> ' + x['credit'] + '</i>'
|
||||||
|
)
|
||||||
|
if x.get('type', '') == 'video':
|
||||||
|
return '<br><a href="{}"><img src="{}"></a><div class="figc">{}</div>\n'.format(
|
||||||
|
x['share_link'], x['thumbnail_url'].split('?')[0] + res, x['caption'] + '<i> ' + x['credit'] + '</i>'
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
jpml = soup.find('jpml')
|
jpml = soup.find('jpml')
|
||||||
if jpml:
|
if jpml:
|
||||||
@ -81,8 +90,9 @@ class WSJ(BasicNewsRecipe):
|
|||||||
dt = soup.find('p', attrs={'id':'orig-pubdate-string'})
|
dt = soup.find('p', attrs={'id':'orig-pubdate-string'})
|
||||||
read = soup.find('p', attrs={'id':'time-to-read'})
|
read = soup.find('p', attrs={'id':'time-to-read'})
|
||||||
byl = soup.find('p', attrs={'id':'byline'})
|
byl = soup.find('p', attrs={'id':'byline'})
|
||||||
if dt and byl and read:
|
fl = soup.find('p', attrs={'id':'flashline'})
|
||||||
dt.name = read.name = byl.name = 'div'
|
if dt and byl and read and fl:
|
||||||
|
dt.name = read.name = byl.name = fl.name = 'div'
|
||||||
byl.insert(0, dt)
|
byl.insert(0, dt)
|
||||||
byl.insert(0, read)
|
byl.insert(0, read)
|
||||||
url = soup.find('p', attrs={'id':'share-link'})
|
url = soup.find('p', attrs={'id':'share-link'})
|
||||||
@ -96,7 +106,7 @@ class WSJ(BasicNewsRecipe):
|
|||||||
if buck:
|
if buck:
|
||||||
data = json.loads(buck.string)
|
data = json.loads(buck.string)
|
||||||
buck.extract()
|
buck.extract()
|
||||||
i_lst = [media_bucket(x) for x in data['items']]
|
i_lst = [self.media_bucket(x) for x in data['items']]
|
||||||
m_itm = soup.findAll('panel', attrs={'class':'media-item'})
|
m_itm = soup.findAll('panel', attrs={'class':'media-item'})
|
||||||
if i_lst and m_itm:
|
if i_lst and m_itm:
|
||||||
for x, y in list(zip_longest(m_itm, i_lst)):
|
for x, y in list(zip_longest(m_itm, i_lst)):
|
||||||
|
@ -9,21 +9,6 @@ from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
|
|
||||||
def media_bucket(x):
|
|
||||||
if x.get('type', '') == 'image':
|
|
||||||
if x.get('subtype', '') == 'graphic':
|
|
||||||
return '<br><img src="{}"><div class="figc">{}</div>\n'.format(
|
|
||||||
x['manifest-url'], x['caption'] + '<i> ' + x['credit'] + '</i>'
|
|
||||||
)
|
|
||||||
return '<br><img src="{}"><div class="figc">{}</div>\n'.format(
|
|
||||||
x['manifest-url'].split('?')[0] + '?width=600', x['caption'] + '<i> ' + x['credit'] + '</i>'
|
|
||||||
)
|
|
||||||
if x.get('type', '') == 'video':
|
|
||||||
return '<br><a href="{}"><img src="{}"></a><div class="figc">{}</div>\n'.format(
|
|
||||||
x['share_link'], x['thumbnail_url'].split('?')[0] + '?width=600', x['caption'] + '<i> ' + x['credit'] + '</i>'
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
class WSJ(BasicNewsRecipe):
|
class WSJ(BasicNewsRecipe):
|
||||||
title = 'WSJ. Magazine'
|
title = 'WSJ. Magazine'
|
||||||
__author__ = 'unkn0wn'
|
__author__ = 'unkn0wn'
|
||||||
@ -39,6 +24,14 @@ class WSJ(BasicNewsRecipe):
|
|||||||
remove_attributes = ['style', 'height', 'width']
|
remove_attributes = ['style', 'height', 'width']
|
||||||
resolve_internal_links = True
|
resolve_internal_links = True
|
||||||
|
|
||||||
|
recipe_specific_options = {
|
||||||
|
'res': {
|
||||||
|
'short': 'For hi-res images, select a resolution from the\nfollowing options: 800, 1000, 1200 or 1500',
|
||||||
|
'long': 'This is useful for non e-ink devices, and for a lower file size\nthan the default, use 400 or 300.',
|
||||||
|
'default': '600'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
extra_css = '''
|
extra_css = '''
|
||||||
#subhed, em { font-style:italic; color:#202020; }
|
#subhed, em { font-style:italic; color:#202020; }
|
||||||
#byline, #time-to-read, #orig-pubdate-string, .article-byline, time, #flashline { font-size:small; }
|
#byline, #time-to-read, #orig-pubdate-string, .article-byline, time, #flashline { font-size:small; }
|
||||||
@ -62,6 +55,25 @@ class WSJ(BasicNewsRecipe):
|
|||||||
dict(name='p', attrs={'id':'orig-pubdate-string'})
|
dict(name='p', attrs={'id':'orig-pubdate-string'})
|
||||||
]
|
]
|
||||||
|
|
||||||
|
def media_bucket(self, x):
|
||||||
|
res = '?width=600'
|
||||||
|
w = self.recipe_specific_options.get('res')
|
||||||
|
if w and isinstance(w, str):
|
||||||
|
res = '?width=' + w
|
||||||
|
if x.get('type', '') == 'image':
|
||||||
|
if x.get('subtype', '') == 'graphic' or 'images.wsj.net' not in x['manifest-url']:
|
||||||
|
return '<br><img src="{}"><div class="figc">{}</div>\n'.format(
|
||||||
|
x['manifest-url'], x['caption'] + '<i> ' + x['credit'] + '</i>'
|
||||||
|
)
|
||||||
|
return '<br><img src="{}"><div class="figc">{}</div>\n'.format(
|
||||||
|
x['manifest-url'].split('?')[0] + res, x['caption'] + '<i> ' + x['credit'] + '</i>'
|
||||||
|
)
|
||||||
|
if x.get('type', '') == 'video':
|
||||||
|
return '<br><a href="{}"><img src="{}"></a><div class="figc">{}</div>\n'.format(
|
||||||
|
x['share_link'], x['thumbnail_url'].split('?')[0] + res, x['caption'] + '<i> ' + x['credit'] + '</i>'
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
jpml = soup.find('jpml')
|
jpml = soup.find('jpml')
|
||||||
if jpml:
|
if jpml:
|
||||||
@ -74,8 +86,9 @@ class WSJ(BasicNewsRecipe):
|
|||||||
dt = soup.find('p', attrs={'id':'orig-pubdate-string'})
|
dt = soup.find('p', attrs={'id':'orig-pubdate-string'})
|
||||||
read = soup.find('p', attrs={'id':'time-to-read'})
|
read = soup.find('p', attrs={'id':'time-to-read'})
|
||||||
byl = soup.find('p', attrs={'id':'byline'})
|
byl = soup.find('p', attrs={'id':'byline'})
|
||||||
if dt and byl and read:
|
fl = soup.find('p', attrs={'id':'flashline'})
|
||||||
dt.name = read.name = byl.name = 'div'
|
if dt and byl and read and fl:
|
||||||
|
dt.name = read.name = byl.name = fl.name = 'div'
|
||||||
byl.insert(0, dt)
|
byl.insert(0, dt)
|
||||||
byl.insert(0, read)
|
byl.insert(0, read)
|
||||||
url = soup.find('p', attrs={'id':'share-link'})
|
url = soup.find('p', attrs={'id':'share-link'})
|
||||||
@ -88,7 +101,7 @@ class WSJ(BasicNewsRecipe):
|
|||||||
if buck:
|
if buck:
|
||||||
data = json.loads(buck.string)
|
data = json.loads(buck.string)
|
||||||
buck.extract()
|
buck.extract()
|
||||||
i_lst = [media_bucket(x) for x in data['items']]
|
i_lst = [self.media_bucket(x) for x in data['items']]
|
||||||
m_itm = soup.findAll('panel', attrs={'class':'media-item'})
|
m_itm = soup.findAll('panel', attrs={'class':'media-item'})
|
||||||
if i_lst and m_itm:
|
if i_lst and m_itm:
|
||||||
for x, y in list(zip_longest(m_itm, i_lst)):
|
for x, y in list(zip_longest(m_itm, i_lst)):
|
||||||
|
@ -9,21 +9,6 @@ from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
|||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
|
|
||||||
def media_bucket(x):
|
|
||||||
if x.get('type', '') == 'image':
|
|
||||||
if x.get('subtype', '') == 'graphic' or 'images.wsj.net' not in x['manifest-url']:
|
|
||||||
return '<br><img src="{}"><div class="figc">{}</div>\n'.format(
|
|
||||||
x['manifest-url'], x['caption'] + '<i> ' + x['credit'] + '</i>'
|
|
||||||
)
|
|
||||||
return '<br><img src="{}"><div class="figc">{}</div>\n'.format(
|
|
||||||
x['manifest-url'].split('?')[0] + '?width=600', x['caption'] + '<i> ' + x['credit'] + '</i>'
|
|
||||||
)
|
|
||||||
if x.get('type', '') == 'video':
|
|
||||||
return '<br><a href="{}"><img src="{}"></a><div class="figc">{}</div>\n'.format(
|
|
||||||
x['share_link'], x['thumbnail_url'].split('?')[0] + '?width=600', x['caption'] + '<i> ' + x['credit'] + '</i>'
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
class WSJ(BasicNewsRecipe):
|
class WSJ(BasicNewsRecipe):
|
||||||
title = 'WSJ News'
|
title = 'WSJ News'
|
||||||
__author__ = 'unkn0wn'
|
__author__ = 'unkn0wn'
|
||||||
@ -47,6 +32,11 @@ class WSJ(BasicNewsRecipe):
|
|||||||
'short': 'Oldest article to download from this news source. In days ',
|
'short': 'Oldest article to download from this news source. In days ',
|
||||||
'long': 'For example, 0.5, gives you articles from the past 12 hours',
|
'long': 'For example, 0.5, gives you articles from the past 12 hours',
|
||||||
'default': str(oldest_article)
|
'default': str(oldest_article)
|
||||||
|
},
|
||||||
|
'res': {
|
||||||
|
'short': 'For hi-res images, select a resolution from the\nfollowing options: 800, 1000, 1200 or 1500',
|
||||||
|
'long': 'This is useful for non e-ink devices, and for a lower file size\nthan the default, use 400 or 300.',
|
||||||
|
'default': '600'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -79,6 +69,25 @@ class WSJ(BasicNewsRecipe):
|
|||||||
dict(name='p', attrs={'id':'orig-pubdate-string'})
|
dict(name='p', attrs={'id':'orig-pubdate-string'})
|
||||||
]
|
]
|
||||||
|
|
||||||
|
def media_bucket(self, x):
|
||||||
|
res = '?width=600'
|
||||||
|
w = self.recipe_specific_options.get('res')
|
||||||
|
if w and isinstance(w, str):
|
||||||
|
res = '?width=' + w
|
||||||
|
if x.get('type', '') == 'image':
|
||||||
|
if x.get('subtype', '') == 'graphic' or 'images.wsj.net' not in x['manifest-url']:
|
||||||
|
return '<br><img src="{}"><div class="figc">{}</div>\n'.format(
|
||||||
|
x['manifest-url'], x['caption'] + '<i> ' + x['credit'] + '</i>'
|
||||||
|
)
|
||||||
|
return '<br><img src="{}"><div class="figc">{}</div>\n'.format(
|
||||||
|
x['manifest-url'].split('?')[0] + res, x['caption'] + '<i> ' + x['credit'] + '</i>'
|
||||||
|
)
|
||||||
|
if x.get('type', '') == 'video':
|
||||||
|
return '<br><a href="{}"><img src="{}"></a><div class="figc">{}</div>\n'.format(
|
||||||
|
x['share_link'], x['thumbnail_url'].split('?')[0] + res, x['caption'] + '<i> ' + x['credit'] + '</i>'
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
jpml = soup.find('jpml')
|
jpml = soup.find('jpml')
|
||||||
if jpml:
|
if jpml:
|
||||||
@ -91,8 +100,9 @@ class WSJ(BasicNewsRecipe):
|
|||||||
dt = soup.find('p', attrs={'id':'orig-pubdate-string'})
|
dt = soup.find('p', attrs={'id':'orig-pubdate-string'})
|
||||||
read = soup.find('p', attrs={'id':'time-to-read'})
|
read = soup.find('p', attrs={'id':'time-to-read'})
|
||||||
byl = soup.find('p', attrs={'id':'byline'})
|
byl = soup.find('p', attrs={'id':'byline'})
|
||||||
if dt and byl and read:
|
fl = soup.find('p', attrs={'id':'flashline'})
|
||||||
dt.name = read.name = byl.name = 'div'
|
if dt and byl and read and fl:
|
||||||
|
dt.name = read.name = byl.name = fl.name = 'div'
|
||||||
byl.insert(0, dt)
|
byl.insert(0, dt)
|
||||||
byl.insert(0, read)
|
byl.insert(0, read)
|
||||||
url = soup.find('p', attrs={'id':'share-link'})
|
url = soup.find('p', attrs={'id':'share-link'})
|
||||||
@ -106,7 +116,7 @@ class WSJ(BasicNewsRecipe):
|
|||||||
if buck:
|
if buck:
|
||||||
data = json.loads(buck.string)
|
data = json.loads(buck.string)
|
||||||
buck.extract()
|
buck.extract()
|
||||||
i_lst = [media_bucket(x) for x in data['items']]
|
i_lst = [self.media_bucket(x) for x in data['items']]
|
||||||
m_itm = soup.findAll('panel', attrs={'class':'media-item'})
|
m_itm = soup.findAll('panel', attrs={'class':'media-item'})
|
||||||
if i_lst and m_itm:
|
if i_lst and m_itm:
|
||||||
for x, y in list(zip_longest(m_itm, i_lst)):
|
for x, y in list(zip_longest(m_itm, i_lst)):
|
||||||
|
Loading…
x
Reference in New Issue
Block a user