mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Merge branch 'master' of https://github.com/unkn0w7n/calibre
This commit is contained in:
commit
7e5f328fc3
@ -18,9 +18,7 @@ class NewYorker(BasicNewsRecipe):
|
|||||||
|
|
||||||
title = "The New Yorker Magazine"
|
title = "The New Yorker Magazine"
|
||||||
description = "Articles of the week's New Yorker magazine"
|
description = "Articles of the week's New Yorker magazine"
|
||||||
|
language = 'en_US'
|
||||||
url_list = []
|
|
||||||
language = 'en'
|
|
||||||
__author__ = 'Kovid Goyal'
|
__author__ = 'Kovid Goyal'
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
timefmt = ' [%b %d]'
|
timefmt = ' [%b %d]'
|
||||||
@ -49,15 +47,6 @@ class NewYorker(BasicNewsRecipe):
|
|||||||
]
|
]
|
||||||
remove_attributes = ['style']
|
remove_attributes = ['style']
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
BasicNewsRecipe.__init__(self, *args, **kwargs)
|
|
||||||
if self.output_profile.short_name.startswith('kindle'):
|
|
||||||
# Reduce image sizes to get file size below amazon's email
|
|
||||||
# sending threshold
|
|
||||||
self.web2disk_options.compress_news_images = True
|
|
||||||
self.web2disk_options.compress_news_images_auto_size = 5
|
|
||||||
self.log.warn('Kindle Output profile being used, reducing image quality to keep file size below amazon email threshold')
|
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
w = '/w_320' # use '/w_640' for highres
|
w = '/w_320' # use '/w_640' for highres
|
||||||
for img in soup.findAll('img'):
|
for img in soup.findAll('img'):
|
||||||
|
@ -1,6 +1,3 @@
|
|||||||
import json
|
|
||||||
import re
|
|
||||||
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
|
|
||||||
@ -18,68 +15,57 @@ class outlook(BasicNewsRecipe):
|
|||||||
remove_attributes = ['height', 'width', 'style']
|
remove_attributes = ['height', 'width', 'style']
|
||||||
ignore_duplicate_articles = {'url'}
|
ignore_duplicate_articles = {'url'}
|
||||||
resolve_internal_links = True
|
resolve_internal_links = True
|
||||||
|
masthead_url = 'https://images.assettype.com/outlookindia/2024-02/96fb06ce-1cc8-410e-ad6c-da4de57405f8/Outlook.svg'
|
||||||
extra_css = '''
|
extra_css = '''
|
||||||
.story-summary{font-style:italic; color:#202020;}
|
.subcap-story {font-style:italic; color:#202020;}
|
||||||
.author_wrapper, .relatedCategory{font-size:small; color:#404040;}
|
.story-slug, .article-name-date {font-size:small; color:#404040;}
|
||||||
#figcap{font-size:small; text-align:center;}
|
.main-img-div, .sb-image {font-size:small; text-align:center;}
|
||||||
|
em { color:#202020; }
|
||||||
'''
|
'''
|
||||||
|
|
||||||
keep_only_tags = [classes('__story_detail')]
|
keep_only_tags = [
|
||||||
|
# classes('story-slug story-title subcap-story article-name-date main-img-div sb-article')
|
||||||
|
classes('story-slug story-title subcap-story article-name-date w-93')
|
||||||
|
]
|
||||||
|
|
||||||
remove_tags = [
|
remove_tags = [
|
||||||
classes(
|
dict(name='svg'),
|
||||||
'social_sharing_article left_trending left-sticky __tag_links next_prev_stories '
|
dict(name='a', attrs={'href':lambda x: x and x.startswith('https://www.whatsapp.com/')}),
|
||||||
'downarrow uparrow more_from_author_links next prev __related_stories_thumbs home_ad_title'
|
classes('ads-box info-img-absolute mobile-info-id story-dec-time-mobile')
|
||||||
)
|
|
||||||
]
|
]
|
||||||
|
|
||||||
def get_browser(self):
|
def get_browser(self):
|
||||||
return BasicNewsRecipe.get_browser(self, user_agent='common_words/based', verify_ssl_certificates=False)
|
return BasicNewsRecipe.get_browser(self, user_agent='common_words/based', verify_ssl_certificates=False)
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
|
self.log(
|
||||||
|
'\n***\nif this recipe fails, report it on: '
|
||||||
|
'https://www.mobileread.com/forums/forumdisplay.php?f=228\n***\n'
|
||||||
|
)
|
||||||
soup = self.index_to_soup('https://www.outlookindia.com/magazine')
|
soup = self.index_to_soup('https://www.outlookindia.com/magazine')
|
||||||
div = soup.find('div', attrs={'class':'wrapper'})
|
a = soup.find('a', attrs={'aria-label':'magazine-cover-image'})
|
||||||
a = div.find('a', href=lambda x: x and x.startswith('/magazine/issue/'))
|
self.cover_url = a.img['src'].split('?')[0]
|
||||||
url = a['href']
|
url = a['href']
|
||||||
self.timefmt = ' [' + self.tag_to_string(a.find('p')).strip() + ']'
|
self.description = self.tag_to_string(a)
|
||||||
|
self.timefmt = ' [' + self.tag_to_string(a.div).strip() + ']'
|
||||||
self.log('Downloading issue:', url, self.timefmt)
|
self.log('Downloading issue:', url, self.timefmt)
|
||||||
soup = self.index_to_soup('https://www.outlookindia.com' + url)
|
soup = self.index_to_soup(url)
|
||||||
cover = soup.find(**classes('listingPage_lead_story'))
|
|
||||||
self.cover_url = cover.find('img', attrs={'src': True})['src']
|
|
||||||
ans = []
|
ans = []
|
||||||
|
|
||||||
for h3 in soup.findAll(['h3', 'h4'],
|
for div in soup.findAll(attrs={'class': 'article-heading-two'}):
|
||||||
attrs={'class': 'tk-kepler-std-condensed-subhead'}):
|
a = div.a
|
||||||
a = h3.find('a', href=True)
|
|
||||||
url = a['href']
|
url = a['href']
|
||||||
title = self.tag_to_string(a)
|
title = self.tag_to_string(a)
|
||||||
desc = ''
|
desc = ''
|
||||||
p = h3.find_next_sibling('p')
|
p = div.find_next_sibling('p', attrs={'class':lambda x: x and 'article-desc' in x.split()})
|
||||||
if p:
|
if p:
|
||||||
desc = self.tag_to_string(p)
|
desc = self.tag_to_string(p)
|
||||||
|
auth = div.find_next_sibling('p', attrs={'class':'author'})
|
||||||
|
if auth:
|
||||||
|
desc = self.tag_to_string(auth) + ' | ' + desc
|
||||||
self.log('\t', title)
|
self.log('\t', title)
|
||||||
self.log('\t', desc)
|
self.log('\t', desc)
|
||||||
self.log('\t\t', url)
|
self.log('\t\t', url)
|
||||||
ans.append({'title': title, 'url': url, 'description': desc})
|
ans.append({'title': title, 'url': url, 'description': desc})
|
||||||
return [('Articles', ans)]
|
return [('Articles', ans)]
|
||||||
|
|
||||||
def preprocess_html(self,soup):
|
|
||||||
for fig in soup.findAll('figure'):
|
|
||||||
fig['id'] = 'figcap'
|
|
||||||
return soup
|
|
||||||
|
|
||||||
def preprocess_raw_html(self, raw, *a):
|
|
||||||
return raw
|
|
||||||
m = re.search('<!-- NewsArticle Schema -->.*?script.*?>', raw, flags=re.DOTALL)
|
|
||||||
raw = raw[m.end():].lstrip()
|
|
||||||
data = json.JSONDecoder().raw_decode(raw)[0]
|
|
||||||
title = data['headline']
|
|
||||||
body = data['articleBody']
|
|
||||||
body = body.replace('\r\n', '<p>')
|
|
||||||
author = ' and '.join(x['name'] for x in data['author'])
|
|
||||||
image = desc = ''
|
|
||||||
if data.get('image'):
|
|
||||||
image = '<p><img src="{}">'.format(data['image']['url'])
|
|
||||||
if data.get('description'):
|
|
||||||
desc = '<h2>' + data['description'] + '</h2>'
|
|
||||||
html = '<html><body><h1>' + title + '</h1>' + desc + '<h3>' + author + '</h3>' + image + '<p>' + body
|
|
||||||
return html
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user