This commit is contained in:
Kovid Goyal 2024-08-04 10:12:25 +05:30
commit 0a6e387483
No known key found for this signature in database
GPG Key ID: 06BC317B515ACE7C
9 changed files with 20 additions and 66 deletions

View File

@ -68,10 +68,10 @@ class Bloomberg(BasicNewsRecipe):
}
remove_tags = [
dict(name=['button', 'svg', 'meta']),
dict(name=['button', 'svg', 'meta', 'iframe']),
dict(name='div', attrs={'id':['bb-that', 'bb-nav']}),
dict(attrs={'data-image-type':'audio'}),
classes('twitter-logo bb-global-footer __sticky__audio__bar__portal__ css--social-wrapper-outer')
classes('twitter-logo bb-global-footer __sticky__audio__bar__portal__ css--social-wrapper-outer bplayer-container')
]
extra_css = '''

View File

@ -1,3 +1,6 @@
#!/usr/bin/env python
# vim:fileencoding=utf-8
import json
import time
from datetime import datetime, timedelta
@ -61,10 +64,10 @@ class Bloomberg(BasicNewsRecipe):
cover_url = 'https://assets.bwbx.io/images/users/iqjWHBFdfxIU/ivUxvlPidC3M/v0/600x-1.jpg'
remove_tags = [
dict(name=['button', 'svg', 'meta']),
dict(name=['button', 'svg', 'meta', 'iframe']),
dict(name='div', attrs={'id':['bb-that', 'bb-nav']}),
dict(attrs={'data-image-type':'audio'}),
classes('twitter-logo bb-global-footer __sticky__audio__bar__portal__ css--social-wrapper-outer')
classes('twitter-logo bb-global-footer __sticky__audio__bar__portal__ css--social-wrapper-outer bplayer-container')
]
extra_css = '''

View File

@ -34,7 +34,7 @@ class TheHindu(BasicNewsRecipe):
recipe_specific_options = {
'location': {
'short': 'The name of the local edition',
'long': ('If The Hindu is available in your local town/city,\n'
'long': ('If The Hindu is available in your local town/city, '
'set this to your location, for example, hyderabad\n'
'Available Editions: bengaluru, chennai, coimbatore, delhi, '
'erode, hyderabad, international, kochi, kolkata,\n'

View File

@ -26,7 +26,7 @@ class ht(BasicNewsRecipe):
recipe_specific_options = {
'location': {
'short': 'The name of the local edition',
'long': ('If The Hindustan Times is available in your local town/city,\n'
'long': ('If The Hindustan Times is available in your local town/city, '
'set this to your location, for example, Delhi\nAvailable Editions:'
'Delhi, Mumbai, Chandigarh, Lucknow, Patna, Bengaluru, Pune, Gurgaon,'
'Ludhiana, Rajasthan, Amritsar,\nEast UP, Haryana, Jammu, Navi Mumbai,'

Binary file not shown.

Before

Width:  |  Height:  |  Size: 377 B

View File

@ -1,3 +1,5 @@
#!/usr/bin/env python
# vim:fileencoding=utf-8
from datetime import datetime, timedelta
from calibre.utils.date import parse_date
@ -116,10 +118,10 @@ class IndianExpress(BasicNewsRecipe):
def get_cover_url(self):
soup = self.index_to_soup(
'https://www.magzter.com/IN/The-Indian-Express-Ltd./The-Indian-Express-Mumbai/Newspaper/'
'https://www.readwhere.com/newspaper/indian-express/Nagpur/38726'
)
for citem in soup.findAll('meta', content=lambda s: s and s.endswith('view/3.jpg')):
return citem['content']
citem = soup.find('meta', attrs={'property':'og:image'})
return citem['content'].replace('300', '600')
def preprocess_html(self, soup):
if h2 := soup.find('h2'):

View File

@ -21,9 +21,9 @@ def resize(x):
return v
m_fr = {
1: 'janvier', 2: 'février', 3: 'mars', 4: 'avril',
5: 'mai', 6: 'juin', 7: 'juillet', 8: 'août',
9: 'septembre', 10: 'octobre', 11: 'novembre', 12: 'décembre'
1: 'Janvier', 2: 'Février', 3: 'Mars', 4: 'Avril',
5: 'Mai', 6: 'Juin', 7: 'Juillet', 8: 'Août',
9: 'Septembre', 10: 'Octobre', 11: 'Novembre', 12: 'Décembre'
}
def json_to_html(raw):
@ -34,7 +34,7 @@ def json_to_html(raw):
auth = '<p class="auth">{}</p>\n'
dt = datetime.fromisoformat(data['last_updated_date'][:-1]) + timedelta(seconds=time.timezone)
dt = dt.strftime('%d ' + m_fr[dt.month] + ' %Y')
dt = dt.strftime(m_fr[dt.month] + '%d, %Y')
a = [x['name'] for x in data['credits']['by']]
if a:
auth = auth.format(', '.join(a) + ' | ' + dt)
@ -86,6 +86,7 @@ class Liberation(BasicNewsRecipe):
oldest_article = 1.15
remove_empty_feeds = True
articles_are_obfuscated = True
timefmt = '[ %s]' % datetime.now().strftime(m_fr[datetime.now().month] '%d, %Y')
ignore_duplicate_articles = {'title', 'url'}
key = 'ZWplZVBlaW5nZWl0YWVnaG8zd2VlbmdlZXlvaHB1'
masthead_url = 'https://journal.liberation.fr/img/logo.svg'

View File

@ -1,52 +0,0 @@
from calibre.web.feeds.news import BasicNewsRecipe
class Athletic(BasicNewsRecipe):
title = u'The Athletic'
__author__ = 'unkn0wn'
description = 'The Athletic delivers powerful stories and smart analysis that bring sports fans closer to the heart of the game. From breaking news and live commentary, to deeply-reported long reads and exclusive interviews, subscribers rely on The Athletic for every sports story that matters.' # noqa
masthead_url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/9/95/The_Athletic_wordmark_black_2020.svg/640px-The_Athletic_wordmark_black_2020.svg.png' # noqa
language = 'en'
oldest_article = 1.15 # days
max_articles_per_feed = 50
encoding = 'utf-8'
use_embedded_content = False
no_stylesheets = True
remove_attributes = ['style', 'height', 'width']
ignore_duplicate_articles = {'url'}
remove_empty_feeds = True
extra_css = '''
#articleByLineString{font-size:small;}
.inline-credits{font-size:small; text-align:center;}
'''
keep_only_tags = [
dict(name='amp-img', attrs={'class': 'i-amphtml-layout-fill'}),
dict(name='div', attrs={'class': ['the-lead-article', 'article-container']})
]
remove_tags = [
dict(name='i-amphtml-sizer')
]
feeds = [
('The Athletic Ink', 'https://theathletic.com/ink/?rss'),
('Football', 'https://theathletic.com/football/?rss'),
('Boxing', 'https://theathletic.com/boxing/?rss'),
('MMA', 'https://theathletic.com/mma/?rss'),
('Motorsports', 'https://theathletic.com/motorsports/?rss'),
('NBA', 'https://theathletic.com/nba/?rss'),
('NHL', 'https://theathletic.com/nhl/?rss'),
('Olympics', 'https://theathletic.com/olympics/?rss'),
('Culture', 'https://theathletic.com/culture/?rss'),
('Others', 'https://theathletic.com/rss-feed/'), # All Articles
# just add '/?rss' to the sections you'd like to get.. there's too many
]
def preprocess_html(self, soup):
for img in soup.findAll('amp-img'):
if not img.find('img'):
img.name = 'img'
return soup
def print_version(self, url):
reset = url.split('?')[0] + '?amp=1'
return reset

View File

@ -28,7 +28,7 @@ class WSJ(BasicNewsRecipe):
title = 'The Wall Street Journal'
__author__ = 'unkn0wn'
description = (
'The Wall Street Journal is your source for breaking news, analysis and insights from the U.S. and '
'The Print Edition of WSJ. The Wall Street Journal is your source for breaking news, analysis and insights from the U.S. and '
'around the world, the world\'s leading business and finance publication.'
)
language = 'en_US'