Update The Week

remove google feeds
This commit is contained in:
unkn0w7n 2024-08-26 19:54:12 +05:30
parent fc089efbd0
commit aadbf63c9c
2 changed files with 46 additions and 66 deletions

View File

@ -1,8 +1,7 @@
#!/usr/bin/env python
''' '''
www.theweek.com www.theweek.com
''' '''
from urllib.parse import quote
from calibre.web.feeds.news import BasicNewsRecipe, classes from calibre.web.feeds.news import BasicNewsRecipe, classes
@ -24,9 +23,6 @@ class TheWeek(BasicNewsRecipe):
ignore_duplicate_articles = {'title', 'url'} ignore_duplicate_articles = {'title', 'url'}
remove_empty_feeds = True remove_empty_feeds = True
resolve_internal_links = True resolve_internal_links = True
simultaneous_downloads = 1
oldest_article = 7 # days
web_url = ''
extra_css = ''' extra_css = '''
img {display:block; margin:0 auto;} img {display:block; margin:0 auto;}
@ -45,21 +41,6 @@ class TheWeek(BasicNewsRecipe):
if '-cover-' in x['image']: if '-cover-' in x['image']:
return 'https://usmagazine.theweek.com' + x['image'][1:] return 'https://usmagazine.theweek.com' + x['image'][1:]
articles_are_obfuscated = True
def get_obfuscated_article(self, url):
br = self.get_browser()
soup = self.index_to_soup(url)
link = soup.a['href']
skip_sections =[ # add sections you want to skip
'/video/', '/videos/', '/multimedia/',
]
if any(x in link for x in skip_sections):
self.abort_article('skipping video links ', link)
self.web_url = link
html = br.open(link).read()
return ({ 'data': html, 'url': link })
keep_only_tags = [ keep_only_tags = [
classes('article-type__breadcrumb header__title header__strapline image image--hero author-byline__author-text article__body') classes('article-type__breadcrumb header__title header__strapline image image--hero author-byline__author-text article__body')
] ]
@ -76,22 +57,31 @@ class TheWeek(BasicNewsRecipe):
img['src'] = img['data-pin-media'].replace('.jpg', '-768-80.jpg') img['src'] = img['data-pin-media'].replace('.jpg', '-768-80.jpg')
return soup return soup
feeds = [] def parse_index(self):
when = oldest_article*24 soup = self.index_to_soup('https://theweek.com/archive')
index = 'https://theweek.com/' list = soup.find('ul', attrs={'class':'archive__list'})
sections = [
'politics', 'news', 'cartoons', 'tech', 'science', 'health', feeds = []
'culture-life', 'business', 'travel', 'arts-life', 'history'
] for li in list.findAll('li', **classes('archive__item--heading'))[:7]:
for sec in sections: section = self.tag_to_string(li)
a = 'https://news.google.com/rss/search?q=when:{}h+allinurl:{}&hl=en-US&gl=US&ceid=US:en' self.log(section)
feeds.append((sec.capitalize(), a.format(when, quote(index + sec, safe=''))))
feeds.append(('Others', a.format(when, quote(index, safe='')))) articles = []
ul = li.findNext('li').ul
for a in ul.findAll('a', href=True):
url = a['href']
if '/puzzles/' in url:
continue
title = self.tag_to_string(a)
self.log(' ', title, '\n\t', url)
articles.append({'title': title, 'url': url})
feeds.append((section, articles))
return feeds
def populate_article_metadata(self, article, soup, first): def populate_article_metadata(self, article, soup, first):
article.title = article.title.replace(' - The Week', '')
desc = soup.find(**classes('header__strapline')) desc = soup.find(**classes('header__strapline'))
if desc: if desc:
article.summary = self.tag_to_string(desc) article.summary = self.tag_to_string(desc)
article.text_summary = article.summary article.text_summary = article.summary
article.url = self.web_url

View File

@ -1,8 +1,7 @@
#!/usr/bin/env python
''' '''
www.theweek.com www.theweek.com
''' '''
from urllib.parse import quote
from calibre.web.feeds.news import BasicNewsRecipe, classes from calibre.web.feeds.news import BasicNewsRecipe, classes
@ -24,9 +23,6 @@ class TheWeek(BasicNewsRecipe):
ignore_duplicate_articles = {'title', 'url'} ignore_duplicate_articles = {'title', 'url'}
remove_empty_feeds = True remove_empty_feeds = True
resolve_internal_links = True resolve_internal_links = True
simultaneous_downloads = 1
oldest_article = 7 # days
web_url = ''
extra_css = ''' extra_css = '''
img {display:block; margin:0 auto;} img {display:block; margin:0 auto;}
@ -45,21 +41,6 @@ class TheWeek(BasicNewsRecipe):
if '-cover-' in x['image']: if '-cover-' in x['image']:
return 'https://ukmagazine.theweek.com' + x['image'][1:] return 'https://ukmagazine.theweek.com' + x['image'][1:]
articles_are_obfuscated = True
def get_obfuscated_article(self, url):
br = self.get_browser()
soup = self.index_to_soup(url)
link = soup.a['href']
skip_sections =[ # add sections you want to skip
'/video/', '/videos/', '/multimedia/',
]
if any(x in link for x in skip_sections):
self.abort_article('skipping video links ', link)
self.web_url = link
html = br.open(link).read()
return ({ 'data': html, 'url': link })
keep_only_tags = [ keep_only_tags = [
classes('article-type__breadcrumb header__title header__strapline image image--hero author-byline__author-text article__body') classes('article-type__breadcrumb header__title header__strapline image image--hero author-byline__author-text article__body')
] ]
@ -76,22 +57,31 @@ class TheWeek(BasicNewsRecipe):
img['src'] = img['data-pin-media'].replace('.jpg', '-768-80.jpg') img['src'] = img['data-pin-media'].replace('.jpg', '-768-80.jpg')
return soup return soup
feeds = [] def parse_index(self):
when = oldest_article*24 soup = self.index_to_soup('https://theweek.com/archive')
index = 'https://theweek.com/' list = soup.find('ul', attrs={'class':'archive__list'})
sections = [
'politics', 'news', 'cartoons', 'tech', 'science', 'health', feeds = []
'culture-life', 'business', 'travel', 'arts-life', 'history'
] for li in list.findAll('li', **classes('archive__item--heading'))[:7]:
for sec in sections: section = self.tag_to_string(li)
a = 'https://news.google.com/rss/search?q=when:{}h+allinurl:{}&hl=en-GB&gl=GB&ceid=GB:en' self.log(section)
feeds.append((sec.capitalize(), a.format(when, quote(index + sec, safe=''))))
feeds.append(('Others', a.format(when, quote(index, safe='')))) articles = []
ul = li.findNext('li').ul
for a in ul.findAll('a', href=True):
url = a['href']
if '/puzzles/' in url:
continue
title = self.tag_to_string(a)
self.log(' ', title, '\n\t', url)
articles.append({'title': title, 'url': url})
feeds.append((section, articles))
return feeds
def populate_article_metadata(self, article, soup, first): def populate_article_metadata(self, article, soup, first):
article.title = article.title.replace(' - The Week', '')
desc = soup.find(**classes('header__strapline')) desc = soup.find(**classes('header__strapline'))
if desc: if desc:
article.summary = self.tag_to_string(desc) article.summary = self.tag_to_string(desc)
article.text_summary = article.summary article.text_summary = article.summary
article.url = self.web_url