mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Merge branch 'master' of https://github.com/unkn0w7n/calibre
This commit is contained in:
commit
781bb771f4
BIN
recipes/icons/politico_eu.png
Normal file
BIN
recipes/icons/politico_eu.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 464 B |
@ -103,7 +103,7 @@ class LiveMint(BasicNewsRecipe):
|
||||
dict(name=['meta', 'link', 'svg', 'button', 'iframe']),
|
||||
classes(
|
||||
'trendingSimilarHeight moreNews mobAppDownload label msgError msgOk taboolaHeight gadgetSlider'
|
||||
' socialHolder imgbig disclamerText disqus-comment-count openinApp2 lastAdSlot bs_logo'
|
||||
' socialHolder imgbig disclamerText disqus-comment-count openinApp2 lastAdSlot bs_logo manualbacklink'
|
||||
' datePublish sepStory premiumSlider moreStory Joinus moreAbout milestone benefitText'
|
||||
)
|
||||
]
|
||||
@ -132,7 +132,9 @@ class LiveMint(BasicNewsRecipe):
|
||||
# remove empty p tags
|
||||
raw = re.sub(
|
||||
r'(<p>\s*)(<[^(\/|a|i|b|em|strong)])', '\g<2>', re.sub(
|
||||
r'(<p>\s* \s*<\/p>)|(<p>\s*<\/p>)|(<p\s*\S+> \s*<\/p>)', '', raw
|
||||
r'(<p>\s* \s*<\/p>)|(<p>\s*<\/p>)|(<p\s*\S+> \s*<\/p>)', '', re.sub(
|
||||
r'(?=<h2>\s*Also\s*Read).*?(?<=</h2>)', '', raw
|
||||
)
|
||||
)
|
||||
)
|
||||
if '<script>var wsjFlag=true;</script>' in raw:
|
||||
|
@ -35,7 +35,8 @@ def parse_lead_image(media):
|
||||
yield '<p>'
|
||||
if 'dsc' in media['image']:
|
||||
yield '<div><img src="{}" alt="{}"></div>'.format(
|
||||
escape(media['image']['src'], True), escape(media['image']['dsc'], True))
|
||||
escape(media['image']['src'], True), escape(media['image']['dsc'], True)
|
||||
)
|
||||
else:
|
||||
yield '<div><img src="{}"></div>'.format(escape(media['image']['src'], True))
|
||||
if 'caption' in media and 'credit' in media:
|
||||
@ -84,7 +85,7 @@ def parse_body(x):
|
||||
if tag == 'inline':
|
||||
yield ''.join(parse_inline(x))
|
||||
elif 'attrs' in x and 'href' in x.get('attrs', ''):
|
||||
yield '<' + tag + ' href = "{}">'.format(x['attrs']['href'])
|
||||
yield '<' + tag + ' href="{}">'.format(x['attrs']['href'])
|
||||
for yld in parse_cont(x):
|
||||
yield yld
|
||||
yield '</' + tag + '>'
|
||||
@ -120,8 +121,7 @@ def parse_article(edg):
|
||||
for item in main['props']['body']:
|
||||
if isinstance(item, dict):
|
||||
if item.get('type', '') == 'inline':
|
||||
for inl in parse_inline(item):
|
||||
yield inl
|
||||
yield ''.join(parse_inline(item))
|
||||
elif isinstance(item, list):
|
||||
for line in item:
|
||||
yield ''.join(parse_body(line))
|
||||
|
@ -34,7 +34,8 @@ def parse_lead_image(media):
|
||||
yield '<p>'
|
||||
if 'dsc' in media['image']:
|
||||
yield '<div><img src="{}" alt="{}"></div>'.format(
|
||||
escape(media['image']['src'], True), escape(media['image']['dsc'], True))
|
||||
escape(media['image']['src'], True), escape(media['image']['dsc'], True)
|
||||
)
|
||||
else:
|
||||
yield '<div><img src="{}"></div>'.format(escape(media['image']['src'], True))
|
||||
if 'caption' in media and 'credit' in media:
|
||||
@ -83,7 +84,7 @@ def parse_body(x):
|
||||
if tag == 'inline':
|
||||
yield ''.join(parse_inline(x))
|
||||
elif 'attrs' in x and 'href' in x.get('attrs', ''):
|
||||
yield '<' + tag + ' href = "{}">'.format(x['attrs']['href'])
|
||||
yield '<' + tag + ' href="{}">'.format(x['attrs']['href'])
|
||||
for yld in parse_cont(x):
|
||||
yield yld
|
||||
yield '</' + tag + '>'
|
||||
@ -119,8 +120,7 @@ def parse_article(edg):
|
||||
for item in main['props']['body']:
|
||||
if isinstance(item, dict):
|
||||
if item.get('type', '') == 'inline':
|
||||
for inl in parse_inline(item):
|
||||
yield inl
|
||||
yield ''.join(parse_inline(item))
|
||||
elif isinstance(item, list):
|
||||
for line in item:
|
||||
yield ''.join(parse_body(line))
|
||||
|
@ -39,7 +39,8 @@ def parse_lead_image(media):
|
||||
yield '<p>'
|
||||
if 'dsc' in media['image']:
|
||||
yield '<div><img src="{}" alt="{}"></div>'.format(
|
||||
escape(media['image']['src'], True), escape(media['image']['dsc'], True))
|
||||
escape(media['image']['src'], True), escape(media['image']['dsc'], True)
|
||||
)
|
||||
else:
|
||||
yield '<div><img src="{}"></div>'.format(escape(media['image']['src'], True))
|
||||
if 'caption' in media and 'credit' in media:
|
||||
@ -88,7 +89,7 @@ def parse_body(x):
|
||||
if tag == 'inline':
|
||||
yield ''.join(parse_inline(x))
|
||||
elif 'attrs' in x and 'href' in x.get('attrs', ''):
|
||||
yield '<' + tag + ' href = "{}">'.format(x['attrs']['href'])
|
||||
yield '<' + tag + ' href="{}">'.format(x['attrs']['href'])
|
||||
for yld in parse_cont(x):
|
||||
yield yld
|
||||
yield '</' + tag + '>'
|
||||
@ -124,8 +125,7 @@ def parse_article(edg):
|
||||
for item in main['props']['body']:
|
||||
if isinstance(item, dict):
|
||||
if item.get('type', '') == 'inline':
|
||||
for inl in parse_inline(item):
|
||||
yield inl
|
||||
yield ''.join(parse_inline(item))
|
||||
elif isinstance(item, list):
|
||||
for line in item:
|
||||
yield ''.join(parse_body(line))
|
||||
|
101
recipes/poliitico_eu.recipe
Normal file
101
recipes/poliitico_eu.recipe
Normal file
@ -0,0 +1,101 @@
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding: cp1252 -*-
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2009, Darko Miletic <darko.miletic at gmail.com>'
|
||||
'''
|
||||
politico.eu
|
||||
'''
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class Politico(BasicNewsRecipe):
|
||||
|
||||
title = 'Politico.eu'
|
||||
__author__ = 'unkn0wn, Darko Miletic and Sujata Raman'
|
||||
description = 'We connect and empower professionals through nonpartisan journalism and actionable intelligence about European politics and policy. Download Weekly.'
|
||||
publisher = 'Axel Springer SE.'
|
||||
category = 'news, politics, Europe'
|
||||
oldest_article = 7 # days
|
||||
max_articles_per_feed = 20
|
||||
use_embedded_content = False
|
||||
no_stylesheets = True
|
||||
remove_javascript = True
|
||||
encoding = 'UTF-8'
|
||||
language = 'en'
|
||||
|
||||
remove_empty_feeds = True
|
||||
ignore_duplicate_articles = ['url']
|
||||
|
||||
html2lrf_options = [
|
||||
'--comment', description, '--category', category, '--publisher', publisher, '--ignore-tables'
|
||||
]
|
||||
|
||||
html2epub_options = 'publisher="' + publisher + '"\ncomments="' + \
|
||||
description + '"\ntags="' + category + '"\nlinearize_tables=True'
|
||||
|
||||
keep_only_tags = [
|
||||
dict(name='div', attrs={'class':['container']}),
|
||||
]
|
||||
|
||||
remove_tags = [
|
||||
dict(name=['notags', 'embed', 'aside', 'object', 'link']),
|
||||
dict(name='div', attrs={'class':'module-article-inline related-articles'}),
|
||||
dict(name='header', attrs={'class':'summary-header'}),
|
||||
dict(name='div', attrs={'class':'amazon-polly-wrapper'}),
|
||||
dict(name='div', attrs={'class': lambda x: x and 'story-meta' in x.split()}),
|
||||
dict(attrs={'class': lambda x: x and 'story-tools' in x.split()}),
|
||||
dict(attrs={'class': lambda x: x and 'social-sharing' in x.split()}),
|
||||
dict(attrs={'class': lambda x: x and 'story-meta__authors' in x.split()}),
|
||||
dict(attrs={'class': lambda x: x and 'story-meta__timestamp' in x.split()}),
|
||||
dict(attrs={'class': lambda x: x and 'story-continued' in x.split()}),
|
||||
dict(attrs={'class': lambda x: x and 'story-supplement' in x.split()}),
|
||||
dict(attrs={'class': lambda x: x and 'story-share' in x.split()}),
|
||||
dict(attrs={'class': lambda x: x and 'suggested' in x.split()}),
|
||||
dict(attrs={'class': lambda x: x and 'article__sidebar' in x.split()}),
|
||||
dict(name='svg'),
|
||||
dict(name='footer'),
|
||||
dict(name='span', attrs={'class':'ad-label'}),
|
||||
dict(name='div', attrs={'class':'pro-pill'}),
|
||||
dict(name='ol', attrs={'class':'breadcrumbs__list'}),
|
||||
]
|
||||
|
||||
preprocess_regexps = [(re.compile(r'<a\s[^>]*>([^<]*)</a>', re.DOTALL|re.IGNORECASE), lambda match: match.group(1))]
|
||||
|
||||
remove_tags_after = dict(attrs={'class': lambda x: x and 'article__more-from' in x.split()})
|
||||
|
||||
extra_css = '''
|
||||
body{font-family:Arial,Sans-serif;}
|
||||
element.style{color:#FF0000;font-family:Arial,Sans-serif;}
|
||||
.author{color:#808080;font-size:x-small;}
|
||||
a{ color:#003399;}
|
||||
.byline{color:#696969 ; font-size:x-small;}
|
||||
.story{color:#000000;}
|
||||
td{color:#000000;}
|
||||
.figcaption__inner, .article-meta, .authors, .datetime {font-size:small; }
|
||||
'''
|
||||
|
||||
def get_article_url(self, article):
|
||||
url = BasicNewsRecipe.get_article_url(self, article)
|
||||
if 'politico.com' not in url:
|
||||
return url.split('?')[0]
|
||||
|
||||
masthead_url = 'https://www.politico.eu/cdn-cgi/image/width=573,quality=80,onerror=redirect,format=auto/wp-content/uploads/2021/02/25/Politico_logo-01.png'
|
||||
|
||||
def get_cover_url(self):
|
||||
soup = self.index_to_soup('https://www.politico.eu/')
|
||||
for cov in soup.findAll(attrs={'class':'cta__image'}):
|
||||
if cov.find('a', attrs={'href':lambda x: x and 'edition.pagesuite-professional' in x}):
|
||||
return cov.a.img['src']
|
||||
|
||||
feeds = [
|
||||
('Policy', 'https://www.politico.eu/section/policy/feed'),
|
||||
('Opinion', 'https://www.politico.eu/section/opinion/feed'),
|
||||
('Newsletter', 'https://www.politico.eu/newsletter/feed'),
|
||||
('Others', 'https://www.politico.eu/feed')
|
||||
]
|
||||
|
||||
def postprocess_html(self, soup, first):
|
||||
for tag in soup.findAll(name=['table', 'tr', 'td']):
|
||||
tag.name = 'div'
|
||||
return soup
|
Loading…
x
Reference in New Issue
Block a user