mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-09-29 15:31:08 -04:00
123 lines
4.4 KiB
Python
123 lines
4.4 KiB
Python
#!/usr/bin/env python
|
|
__license__ = 'GPL v3'
|
|
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
|
__docformat__ = 'restructuredtext en'
|
|
|
|
'''
|
|
www.guardian.co.uk
|
|
'''
|
|
from datetime import date
|
|
|
|
from calibre import random_user_agent
|
|
from calibre.web.feeds.news import BasicNewsRecipe
|
|
|
|
|
|
def classes(classes):
|
|
q = frozenset(classes.split(' '))
|
|
return dict(attrs={
|
|
'class': lambda x: x and frozenset(x.split()).intersection(q)})
|
|
|
|
|
|
class Guardian(BasicNewsRecipe):
|
|
|
|
title = u'The Guardian and The Observer'
|
|
is_observer = False
|
|
base_url = "https://www.theguardian.com/uk"
|
|
if date.today().weekday() == 6:
|
|
is_observer = True
|
|
base_url = "https://www.theguardian.com/observer"
|
|
|
|
__author__ = 'Kovid Goyal'
|
|
language = 'en_GB'
|
|
|
|
oldest_article = 7
|
|
max_articles_per_feed = 100
|
|
remove_javascript = True
|
|
encoding = 'utf-8'
|
|
remove_empty_feeds = True
|
|
no_stylesheets = True
|
|
remove_attributes = ['style', 'width', 'height']
|
|
ignore_duplicate_articles = {'title', 'url'}
|
|
|
|
timefmt = ' [%a, %d %b %Y]'
|
|
|
|
remove_tags = [
|
|
dict(attrs={'class': lambda x: x and '--twitter' in x}),
|
|
dict(attrs={'class': lambda x: x and 'submeta' in x.split()}),
|
|
dict(attrs={'data-component': ['share', 'social', 'nav', 'nav2']}),
|
|
dict(attrs={'data-link-name': 'block share'}),
|
|
dict(attrs={'data-print-layout': 'hide'}),
|
|
dict(id=['dfp-ad--survey', 'sub-nav-root', 'the-caption', 'bannerandheader']),
|
|
{'for': 'the-checkbox'},
|
|
dict(href=['#maincontent', '#navigation']),
|
|
dict(role=['navigation', 'button']),
|
|
dict(attrs={'class': lambda x: x and 'inline-expand-image' in x}),
|
|
dict(name='a', attrs={'aria-label': lambda x: x and 'Share On' in x}),
|
|
dict(name='a', attrs={'class': lambda x: x and 'social__action js-social__action--top' in x}),
|
|
dict(name='div', attrs={'id': 'share-count-root'}),
|
|
dict(attrs={'class': lambda x: x and 'modern-visible' in x.split()}),
|
|
classes('badge-slot reveal-caption__checkbox mobile-only element-rich-link'),
|
|
dict(name=['link', 'meta', 'style', 'svg', 'input', 'source', 'noscript', 'button']),
|
|
dict(name='img', src=lambda x: x and 'https://sb.scorecardresearch.com/' in x),
|
|
]
|
|
remove_tags_after = [
|
|
classes('content__article-body js-bottom-marker article-body-commercial-selector'),
|
|
]
|
|
|
|
extra_css = """
|
|
img {
|
|
max-width: 100% !important;
|
|
max-height: 100% !important;
|
|
}
|
|
|
|
a span {
|
|
color: #E05E02;
|
|
}
|
|
|
|
figcaption span {
|
|
font-size: 0.5em;
|
|
color: #6B6B6B;
|
|
}
|
|
"""
|
|
|
|
def get_browser(self, *a, **kw):
|
|
# This site returns images in JPEG-XR format if the user agent is IE
|
|
if not hasattr(self, 'non_ie_ua'):
|
|
try:
|
|
self.non_ie_ua = random_user_agent(allow_ie=False)
|
|
except TypeError:
|
|
self.non_ie_ua = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.111 Safari/537.36'
|
|
kw['user_agent'] = self.non_ie_ua
|
|
br = BasicNewsRecipe.get_browser(self, *a, **kw)
|
|
return br
|
|
|
|
def parse_section(self, section_url):
|
|
soup = self.index_to_soup(section_url)
|
|
for section in soup.findAll('section'):
|
|
articles = []
|
|
title = self.tag_to_string(section.find('h2'))
|
|
if not title:
|
|
continue
|
|
self.log('Found section:', title)
|
|
for li in section.findAll('li'):
|
|
a = li.find('a', attrs={'href': True, 'aria-label': True})
|
|
if a:
|
|
url = a['href']
|
|
if url.startswith('/'):
|
|
url = self.base_url.rpartition('/')[0] + url
|
|
self.log('\t', a['aria-label'], url)
|
|
articles.append({'title': a['aria-label'], 'url': url})
|
|
if articles:
|
|
yield title, articles
|
|
|
|
def parse_index(self):
|
|
feeds = list(self.parse_section(self.base_url))
|
|
feeds += list(self.parse_section('https://www.theguardian.com/uk/sport'))
|
|
return feeds
|
|
|
|
def preprocess_html(self, soup):
|
|
for table in soup.findAll('table'):
|
|
if len(table.findAll('tr')) > 20:
|
|
table.decompose()
|
|
return soup
|