mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
change google feeds based recipes..
..to parse their front pages instead.
This commit is contained in:
parent
31be5f3708
commit
69ba1b60c2
@ -1,6 +1,10 @@
|
|||||||
from calibre.ptempfile import PersistentTemporaryFile
|
#!/usr/bin/env python
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
from datetime import date
|
||||||
|
|
||||||
|
def absurl(url):
|
||||||
|
if url.startswith('/'):
|
||||||
|
return 'https://www.afr.com' + url
|
||||||
|
|
||||||
class afr(BasicNewsRecipe):
|
class afr(BasicNewsRecipe):
|
||||||
title = 'Australian Financial Review'
|
title = 'Australian Financial Review'
|
||||||
@ -40,34 +44,10 @@ class afr(BasicNewsRecipe):
|
|||||||
[data-testid="AuthorNames"], [data-testid="ArticleTimestamp"] {font-size:small;}
|
[data-testid="AuthorNames"], [data-testid="ArticleTimestamp"] {font-size:small;}
|
||||||
'''
|
'''
|
||||||
|
|
||||||
ignore_duplicate_articles = {'title'}
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
resolve_internal_links = True
|
resolve_internal_links = True
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
|
|
||||||
articles_are_obfuscated = True
|
|
||||||
|
|
||||||
def get_obfuscated_article(self, url):
|
|
||||||
br = self.get_browser()
|
|
||||||
try:
|
|
||||||
br.open(url)
|
|
||||||
except Exception as e:
|
|
||||||
url = e.hdrs.get('location')
|
|
||||||
soup = self.index_to_soup(url)
|
|
||||||
link = soup.find('a', href=True)
|
|
||||||
skip_sections =[ # add sections you want to skip
|
|
||||||
'/video/', '/videos/', '/media/', 'podcast-'
|
|
||||||
]
|
|
||||||
if any(x in link['href'] for x in skip_sections):
|
|
||||||
self.log('Aborting Article ', link['href'])
|
|
||||||
self.abort_article('skipping video links')
|
|
||||||
|
|
||||||
self.log('Downloading ', link['href'])
|
|
||||||
html = br.open(link['href']).read()
|
|
||||||
pt = PersistentTemporaryFile('.html')
|
|
||||||
pt.write(html)
|
|
||||||
pt.close()
|
|
||||||
return pt.name
|
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
for img in soup.findAll('img', attrs={'data-src':True}):
|
for img in soup.findAll('img', attrs={'data-src':True}):
|
||||||
img['src'] = img['data-src']
|
img['src'] = img['data-src']
|
||||||
@ -87,3 +67,29 @@ class afr(BasicNewsRecipe):
|
|||||||
a = 'https://news.google.com/rss/search?q=when:27h+allinurl:https%3A%2F%2Fwww.afr.com{}&hl=en-AU&gl=AU&ceid=AU:en'
|
a = 'https://news.google.com/rss/search?q=when:27h+allinurl:https%3A%2F%2Fwww.afr.com{}&hl=en-AU&gl=AU&ceid=AU:en'
|
||||||
feeds.append((sec.capitalize(), a.format('%2F' + sec + '%2F')))
|
feeds.append((sec.capitalize(), a.format('%2F' + sec + '%2F')))
|
||||||
feeds.append(('Others', a.format('')))
|
feeds.append(('Others', a.format('')))
|
||||||
|
|
||||||
|
def parse_index(self):
|
||||||
|
index = 'https://www.afr.com/'
|
||||||
|
sections = [
|
||||||
|
'companies', 'market', 'politics', 'policy', 'world', 'wealth', 'street-talk',
|
||||||
|
'chaticleer', 'rear-window', 'life-and-luxury', 'technology', 'property',
|
||||||
|
'work-and-careers',
|
||||||
|
]
|
||||||
|
feeds = []
|
||||||
|
soup = self.index_to_soup(index)
|
||||||
|
for sec in sections:
|
||||||
|
section = sec.capitalize()
|
||||||
|
self.log(section)
|
||||||
|
articles = []
|
||||||
|
for a in soup.findAll('a', attrs={'href':lambda x: x and x.startswith('/' + sec + '/')}):
|
||||||
|
url = absurl(a['href'].split('?')[0])
|
||||||
|
if url in {index + sec + '/', index + sec}:
|
||||||
|
continue
|
||||||
|
if date.today().strftime('%Y') not in url:
|
||||||
|
continue
|
||||||
|
title = self.tag_to_string(a)
|
||||||
|
self.log('\t', title, '\n\t\t', url)
|
||||||
|
articles.append({'title': title, 'url': url})
|
||||||
|
if articles:
|
||||||
|
feeds.append((section, articles))
|
||||||
|
return feeds
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from calibre.ptempfile import PersistentTemporaryFile
|
#!/usr/bin/env python
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe, prefixed_classes
|
from calibre.web.feeds.news import BasicNewsRecipe, prefixed_classes
|
||||||
|
|
||||||
|
|
||||||
@ -38,37 +38,24 @@ class bar(BasicNewsRecipe):
|
|||||||
resolve_internal_links = True
|
resolve_internal_links = True
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
|
|
||||||
articles_are_obfuscated = True
|
def parse_index(self):
|
||||||
|
index = 'https://www.barandbench.com/'
|
||||||
def get_obfuscated_article(self, url):
|
sections = [
|
||||||
br = self.get_browser()
|
'news', 'columns', 'interviews', 'law-firms', 'apprentice-lawyer', 'legal-jobs'
|
||||||
try:
|
|
||||||
br.open(url)
|
|
||||||
except Exception as e:
|
|
||||||
url = e.hdrs.get('location')
|
|
||||||
soup = self.index_to_soup(url)
|
|
||||||
link = soup.find('a', href=True)
|
|
||||||
skip_sections =[ # add sections you want to skip
|
|
||||||
'/video/', '/videos/', '/media/', 'podcast-'
|
|
||||||
]
|
]
|
||||||
if any(x in link['href'] for x in skip_sections):
|
feeds = []
|
||||||
self.log('Aborting Article ', link['href'])
|
soup = self.index_to_soup(index)
|
||||||
self.abort_article('skipping video links')
|
for sec in sections:
|
||||||
|
section = sec.capitalize()
|
||||||
self.log('Downloading ', link['href'])
|
self.log(section)
|
||||||
html = br.open(link['href']).read()
|
articles = []
|
||||||
pt = PersistentTemporaryFile('.html')
|
for a in soup.findAll('a', attrs={'href':lambda x: x and x.startswith(index + sec + '/')}):
|
||||||
pt.write(html)
|
url = a['href'].split('?')[0]
|
||||||
pt.close()
|
if url in {index + sec + '/', index + sec}:
|
||||||
return pt.name
|
continue
|
||||||
|
title = self.tag_to_string(a)
|
||||||
feeds = []
|
self.log('\t', title, '\n\t\t', url)
|
||||||
|
articles.append({'title': title, 'url': url})
|
||||||
sections = [
|
if articles:
|
||||||
'news', 'columns', 'interviews', 'law-firms', 'apprentice-lawyer', 'legal-jobs'
|
feeds.append((section, articles))
|
||||||
]
|
return feeds
|
||||||
|
|
||||||
for sec in sections:
|
|
||||||
a = 'https://news.google.com/rss/search?q=when:27h+allinurl:barandbench.com{}&hl=en-IN&gl=IN&ceid=IN:en'
|
|
||||||
feeds.append((sec.capitalize(), a.format('%2F' + sec + '%2F')))
|
|
||||||
feeds.append(('Others', a.format('')))
|
|
||||||
|
@ -1,7 +1,10 @@
|
|||||||
from calibre.ptempfile import PersistentTemporaryFile
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
|
|
||||||
|
def absurl(url):
|
||||||
|
if url.startswith('/'):
|
||||||
|
return 'https://www.deccanherald.com' + url
|
||||||
|
|
||||||
class herald(BasicNewsRecipe):
|
class herald(BasicNewsRecipe):
|
||||||
title = 'Deccan Herald'
|
title = 'Deccan Herald'
|
||||||
__author__ = 'unkn0wn'
|
__author__ = 'unkn0wn'
|
||||||
@ -12,31 +15,6 @@ class herald(BasicNewsRecipe):
|
|||||||
ignore_duplicate_articles = {'url', 'title'}
|
ignore_duplicate_articles = {'url', 'title'}
|
||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
|
|
||||||
articles_are_obfuscated = True
|
|
||||||
|
|
||||||
def get_obfuscated_article(self, url):
|
|
||||||
br = self.get_browser()
|
|
||||||
try:
|
|
||||||
br.open(url)
|
|
||||||
except Exception as e:
|
|
||||||
url = e.hdrs.get('location')
|
|
||||||
soup = self.index_to_soup(url)
|
|
||||||
link = soup.find('a', href=True)
|
|
||||||
skip_sections =[ # add sections you want to skip
|
|
||||||
'/video/', '/bengaluru-crime/', '/metrolife/',
|
|
||||||
'/karnataka-districts/', '/brandspot/', '/entertainment/',
|
|
||||||
]
|
|
||||||
if any(x in link['href'] for x in skip_sections):
|
|
||||||
self.log('Aborting Article ', link['href'])
|
|
||||||
self.abort_article('skipping section')
|
|
||||||
|
|
||||||
self.log('Downloading ', link['href'])
|
|
||||||
html = br.open(link['href']).read()
|
|
||||||
pt = PersistentTemporaryFile('.html')
|
|
||||||
pt.write(html)
|
|
||||||
pt.close()
|
|
||||||
return pt.name
|
|
||||||
|
|
||||||
keep_only_tags = [
|
keep_only_tags = [
|
||||||
classes('article-title article-author__name'),
|
classes('article-title article-author__name'),
|
||||||
dict(name='div', attrs={'id':'main-content'})
|
dict(name='div', attrs={'id':'main-content'})
|
||||||
@ -51,16 +29,25 @@ class herald(BasicNewsRecipe):
|
|||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
feeds = [
|
def parse_index(self):
|
||||||
('Nation', 'https://news.google.com/rss/search?q=when:27h+allinurl:deccanherald.com%2Fnational%2F&hl=en-IN&gl=IN&ceid=IN:en'),
|
index = 'https://www.deccanherald.com/'
|
||||||
('Karnataka', 'https://news.google.com/rss/search?q=when:27h+allinurl:deccanherald.com%2Fstate%2F&hl=en-IN&gl=IN&ceid=IN:en'),
|
sections = [
|
||||||
('Opinion', 'https://news.google.com/rss/search?q=when:27h+allinurl:deccanherald.com%2Fopinion%2F&hl=en-IN&gl=IN&ceid=IN:en'),
|
'india', 'world', 'elections', 'opinion', 'specials', 'business', 'sports'
|
||||||
('City',
|
]
|
||||||
'https://news.google.com/rss/search?q=when:27h+allinurl:deccanherald.com%2Fcity%2F&hl=en-IN&gl=IN&ceid=IN:en'),
|
feeds = []
|
||||||
('Business', 'https://news.google.com/rss/search?q=when:27h+allinurl:deccanherald.com%2Fbusiness%2F&hl=en-IN&gl=IN&ceid=IN:en'),
|
|
||||||
('World',
|
for sec in sections:
|
||||||
'https://news.google.com/rss/search?q=when:27h+allinurl:deccanherald.com%2Finternational%2F&hl=en-IN&gl=IN&ceid=IN:en'),
|
soup = self.index_to_soup(index + sec)
|
||||||
('Sports',
|
section = sec.capitalize()
|
||||||
'https://news.google.com/rss/search?q=when:27h+allinurl:deccanherald.com%2Fsports%2F&hl=en-IN&gl=IN&ceid=IN:en'),
|
self.log(section)
|
||||||
('Others', 'https://news.google.com/rss/search?q=when:27h+allinurl:deccanherald.com&hl=en-IN&gl=IN&ceid=IN:en'),
|
articles = []
|
||||||
]
|
for a in soup.findAll('a', attrs={'href':lambda x: x and x.startswith('/' + sec + '/')}):
|
||||||
|
url = absurl(a['href'].split('?')[0])
|
||||||
|
if url in {index + sec + '/', index + sec}:
|
||||||
|
continue
|
||||||
|
title = self.tag_to_string(a)
|
||||||
|
self.log('\t', title, '\n\t\t', url)
|
||||||
|
articles.append({'title': title, 'url': url})
|
||||||
|
if articles:
|
||||||
|
feeds.append((section, articles))
|
||||||
|
return feeds
|
||||||
|
@ -1,9 +1,6 @@
|
|||||||
from calibre.ptempfile import PersistentTemporaryFile
|
#!/usr/bin/env python
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
# Firstpost feeds mix sections into other feeds, like explainers end up in opinion feed and opinions end up in India feed.
|
|
||||||
# change google_feeds to True to fetch right sections.
|
|
||||||
google_feeds = False
|
|
||||||
|
|
||||||
class firstpost(BasicNewsRecipe):
|
class firstpost(BasicNewsRecipe):
|
||||||
title = 'Firstpost'
|
title = 'Firstpost'
|
||||||
@ -43,35 +40,11 @@ class firstpost(BasicNewsRecipe):
|
|||||||
'world', 'web-stories', 'tech', 'artandculture', 'health', 'health-supplement',
|
'world', 'web-stories', 'tech', 'artandculture', 'health', 'health-supplement',
|
||||||
# 'photos', 'entertainment', 'living', 'education', 'sports', 'firstcricket',
|
# 'photos', 'entertainment', 'living', 'education', 'sports', 'firstcricket',
|
||||||
]
|
]
|
||||||
if not google_feeds:
|
|
||||||
oldest_article = 1.2 # days
|
|
||||||
for sec in sections:
|
|
||||||
a = 'https://www.firstpost.com/rss/{}.xml'
|
|
||||||
feeds.append((sec.capitalize(), a.format(sec)))
|
|
||||||
else:
|
|
||||||
articles_are_obfuscated = True
|
|
||||||
|
|
||||||
def get_obfuscated_article(self, url):
|
oldest_article = 1.2 # days
|
||||||
br = self.get_browser()
|
for sec in sections:
|
||||||
soup = self.index_to_soup(url)
|
a = 'https://www.firstpost.com/rss/{}.xml'
|
||||||
link = soup.find('a', href=True)
|
feeds.append((sec.capitalize(), a.format(sec)))
|
||||||
skip_sections =[ # add sections you want to skip
|
|
||||||
'/video/', '/videos/', '/media/', '/vantage/'
|
|
||||||
]
|
|
||||||
if any(x in link['href'] for x in skip_sections):
|
|
||||||
self.log('Aborting Article ', link['href'])
|
|
||||||
self.abort_article('skipping video links')
|
|
||||||
self.log('Downloading ', link['href'])
|
|
||||||
html = br.open(link['href']).read()
|
|
||||||
pt = PersistentTemporaryFile('.html')
|
|
||||||
pt.write(html)
|
|
||||||
pt.close()
|
|
||||||
return pt.name
|
|
||||||
|
|
||||||
for sec in sections:
|
|
||||||
a = 'https://news.google.com/rss/search?q=when:27h+allinurl:firstpost.com{}&hl=en-IN&gl=IN&ceid=IN:en'
|
|
||||||
feeds.append((sec.capitalize(), a.format('%2F' + sec + '%2F')))
|
|
||||||
# feeds.append(('Others', a.format('')))
|
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
if h2 := soup.find('h2', attrs={'class':'category-name'}):
|
if h2 := soup.find('h2', attrs={'class':'category-name'}):
|
||||||
|
@ -1,10 +1,13 @@
|
|||||||
from calibre.ptempfile import PersistentTemporaryFile
|
#!/usr/bin/env python
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
'''
|
'''
|
||||||
Hamilton Spectator Calibre Recipe
|
Hamilton Spectator Calibre Recipe
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
def absurl(url):
|
||||||
|
if url.startswith('/'):
|
||||||
|
return 'https://www.thespec.com' + url
|
||||||
|
|
||||||
class HamiltonSpectator(BasicNewsRecipe):
|
class HamiltonSpectator(BasicNewsRecipe):
|
||||||
title = u'Hamilton Spectator'
|
title = u'Hamilton Spectator'
|
||||||
@ -21,8 +24,7 @@ class HamiltonSpectator(BasicNewsRecipe):
|
|||||||
remove_attributes = ['style', 'height', 'width']
|
remove_attributes = ['style', 'height', 'width']
|
||||||
masthead_url = 'https://bloximages.chicago2.vip.townnews.com/thespec.com/content/tncms/custom/image/c0094646-1108-11ee-8af0-b3954ce40e5e.png'
|
masthead_url = 'https://bloximages.chicago2.vip.townnews.com/thespec.com/content/tncms/custom/image/c0094646-1108-11ee-8af0-b3954ce40e5e.png'
|
||||||
|
|
||||||
ignore_duplicate_articles = {'title'}
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
articles_are_obfuscated = True
|
|
||||||
|
|
||||||
extra_css = '''
|
extra_css = '''
|
||||||
.caption { font-size:small; text-align:center; }
|
.caption { font-size:small; text-align:center; }
|
||||||
@ -52,35 +54,26 @@ class HamiltonSpectator(BasicNewsRecipe):
|
|||||||
img['src'] = x.split()[0]
|
img['src'] = x.split()[0]
|
||||||
return soup
|
return soup
|
||||||
|
|
||||||
def get_obfuscated_article(self, url):
|
def parse_index(self):
|
||||||
br = self.get_browser()
|
index = 'https://www.thespec.com/'
|
||||||
try:
|
sections = [
|
||||||
br.open(url)
|
'news', 'politics', 'opinion', 'business', 'sports', 'life', 'entertainment'
|
||||||
except Exception as e:
|
|
||||||
url = e.hdrs.get('location')
|
|
||||||
soup = self.index_to_soup(url)
|
|
||||||
link = soup.find('a', href=True)
|
|
||||||
skip_sections =[ # add sections you want to skip
|
|
||||||
'/video/', '/videos/', '/media/', 'podcast'
|
|
||||||
]
|
]
|
||||||
if any(x in link['href'] for x in skip_sections):
|
feeds = []
|
||||||
self.log('Aborting Article ', link['href'])
|
soup = self.index_to_soup(index)
|
||||||
self.abort_article('skipping video links')
|
for sec in sections:
|
||||||
|
section = sec.capitalize()
|
||||||
self.log('Downloading ', link['href'])
|
self.log(section)
|
||||||
html = br.open(link['href']).read()
|
articles = []
|
||||||
pt = PersistentTemporaryFile('.html')
|
for a in soup.findAll('a', attrs={'href':lambda x: x and x.startswith('/' + sec + '/')}):
|
||||||
pt.write(html)
|
url = absurl(a['href'].split('#')[0])
|
||||||
pt.close()
|
if url in {index + sec + '/', index + sec}:
|
||||||
return pt.name
|
continue
|
||||||
|
if not url.endswith('.html'):
|
||||||
feeds = []
|
continue
|
||||||
|
title = self.tag_to_string(a)
|
||||||
sections = [
|
self.log('\t', title, '\n\t\t', url)
|
||||||
'news', 'politics', 'opinion', 'business', 'sports', 'life', 'entertainment'
|
articles.append({'title': title, 'url': url})
|
||||||
]
|
if articles:
|
||||||
|
feeds.append((section, articles))
|
||||||
for sec in sections:
|
return feeds
|
||||||
a = 'https://news.google.com/rss/search?q=when:27h+allinurl:thespec.com{}&hl=en-CA&gl=IN&ceid=CA:en'
|
|
||||||
feeds.append((sec.capitalize(), a.format('%2F' + sec + '%2F')))
|
|
||||||
feeds.append(('Others', a.format('')))
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from calibre.ptempfile import PersistentTemporaryFile
|
#!/usr/bin/env python
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
|
|
||||||
@ -27,48 +27,38 @@ class hindutamil(BasicNewsRecipe):
|
|||||||
classes('newsbot-ads article-details-ads-inner art-follow-title1 dont-miss-it')
|
classes('newsbot-ads article-details-ads-inner art-follow-title1 dont-miss-it')
|
||||||
]
|
]
|
||||||
|
|
||||||
ignore_duplicate_articles = {'title'}
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
|
|
||||||
articles_are_obfuscated = True
|
def parse_index(self):
|
||||||
|
index = 'https://www.hindutamil.in/'
|
||||||
def get_obfuscated_article(self, url):
|
sections = [
|
||||||
br = self.get_browser()
|
('தமிழகம்', 'tamilnadu'),
|
||||||
try:
|
('இந்தியா', 'india'),
|
||||||
br.open(url)
|
('கருத்துப் பேழை', 'opinion'),
|
||||||
except Exception as e:
|
('உலகம்', 'world'),
|
||||||
url = e.hdrs.get('location')
|
('வணிகம்', 'business'),
|
||||||
soup = self.index_to_soup(url)
|
('விளையாட்டு', 'sports'),
|
||||||
link = soup.find('a', href=True)
|
('தமிழ் சினிமா', 'cinema'),
|
||||||
skip_sections =[ # add sections you want to skip
|
('தொழில்நுட்பம்', 'technology'),
|
||||||
'/video/', '/videos/', '/media/'
|
('இணைப்பிதழ்கள்', 'supplements'),
|
||||||
|
('Cartoon', 'cartoon'),
|
||||||
|
('Life-style', 'life-style')
|
||||||
]
|
]
|
||||||
if any(x in link['href'] for x in skip_sections):
|
feeds = []
|
||||||
self.log('Aborting Article ', link['href'])
|
soup = self.index_to_soup(index)
|
||||||
self.abort_article('skipping video links')
|
index = index + 'news/'
|
||||||
|
for sec in sections:
|
||||||
self.log('Downloading ', link['href'])
|
section = sec[0]
|
||||||
html = br.open(link['href']).read()
|
self.log(section)
|
||||||
pt = PersistentTemporaryFile('.html')
|
articles = []
|
||||||
pt.write(html)
|
for a in soup.findAll('a', attrs={'href':lambda x: x and x.startswith(index + sec[1] + '/')}):
|
||||||
pt.close()
|
url = a['href']
|
||||||
return pt.name
|
if url in {index + sec[1] + '/', index + sec[1]}:
|
||||||
|
continue
|
||||||
feeds = []
|
title = self.tag_to_string(a)
|
||||||
|
self.log('\t', title, '\n\t\t', url)
|
||||||
sections = [
|
articles.append({'title': title, 'url': url})
|
||||||
('தமிழகம்', 'tamilnadu'),
|
if articles:
|
||||||
('இந்தியா', 'india'),
|
feeds.append((section, articles))
|
||||||
('கருத்துப் பேழை', 'opinion'),
|
return feeds
|
||||||
('உலகம்', 'world'),
|
|
||||||
('வணிகம்', 'business'),
|
|
||||||
# ('விளையாட்டு', 'sports'),
|
|
||||||
# ('தமிழ் சினிமா', 'cinema'),
|
|
||||||
('தொழில்நுட்பம்', 'technology'),
|
|
||||||
# ('இணைப்பிதழ்கள்', 'supplements'),
|
|
||||||
]
|
|
||||||
|
|
||||||
for sec in sections:
|
|
||||||
a = 'https://news.google.com/rss/search?q=when:27h+allinurl:hindutamil.in%2Fnews{}&hl=ta-IN&gl=IN&ceid=IN:ta'
|
|
||||||
feeds.append((sec[0], a.format('%2F' + sec[1] + '%2F')))
|
|
||||||
# feeds.append(('Others', a.format('')))
|
|
||||||
|
@ -1,7 +1,11 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
def absurl(url):
|
||||||
|
if url.startswith('/'):
|
||||||
|
return 'https://www.infzm.com' + url
|
||||||
|
|
||||||
def json_to_html(raw, link):
|
def json_to_html(raw, link):
|
||||||
data = json.loads(raw)
|
data = json.loads(raw)
|
||||||
@ -21,24 +25,19 @@ class infzm(BasicNewsRecipe):
|
|||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
remove_javascript = True
|
remove_javascript = True
|
||||||
ignore_duplicate_articles = {'title'}
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
use_embedded_content = False
|
use_embedded_content = False
|
||||||
remove_attributes = ['style', 'height', 'width']
|
remove_attributes = ['style', 'height', 'width']
|
||||||
masthead_url = 'http://ssimg.kkod.cn/web/02/14227.gif'
|
masthead_url = 'http://ssimg.kkod.cn/web/02/14227.gif'
|
||||||
|
|
||||||
articles_are_obfuscated = True
|
|
||||||
|
|
||||||
remove_tags = [dict(name=['video', 'svg', 'button'])]
|
remove_tags = [dict(name=['video', 'svg', 'button'])]
|
||||||
|
|
||||||
|
articles_are_obfuscated = True
|
||||||
|
|
||||||
def get_obfuscated_article(self, url):
|
def get_obfuscated_article(self, url):
|
||||||
br = self.get_browser()
|
br = self.get_browser()
|
||||||
try:
|
link = url
|
||||||
br.open(url)
|
|
||||||
except Exception as e:
|
|
||||||
url = e.hdrs.get('location')
|
|
||||||
soup = self.index_to_soup(url)
|
|
||||||
link = soup.find('a', href=True)['href'].split('?')[0]
|
|
||||||
res_link = link.replace('https://www.infzm.com', 'https://api.infzm.com/mobile') \
|
res_link = link.replace('https://www.infzm.com', 'https://api.infzm.com/mobile') \
|
||||||
+ '?platform=wap&version=1.89.0&machine_id=35458aa29603f2b246636e5492122b50&user_id=&token=&member_type='
|
+ '?platform=wap&version=1.89.0&machine_id=35458aa29603f2b246636e5492122b50&user_id=&token=&member_type='
|
||||||
# if article is paywalled, add code to figure out machine_id
|
# if article is paywalled, add code to figure out machine_id
|
||||||
@ -51,12 +50,29 @@ class infzm(BasicNewsRecipe):
|
|||||||
.cm_pic_caption, .cm_pic_author { font-size:small; text-align:center; }
|
.cm_pic_caption, .cm_pic_author { font-size:small; text-align:center; }
|
||||||
'''
|
'''
|
||||||
|
|
||||||
feeds = [
|
|
||||||
('南方周末', 'https://news.google.com/rss/search?q=when:170h+allinurl:https%3A%2F%2Fwww.infzm.com&hl=zh-HK&gl=HK&ceid=HK:zh')
|
def parse_index(self):
|
||||||
]
|
index = 'https://www.infzm.com/'
|
||||||
|
sections = [
|
||||||
|
'contents'
|
||||||
|
]
|
||||||
|
feeds = []
|
||||||
|
soup = self.index_to_soup(index)
|
||||||
|
for sec in sections:
|
||||||
|
section = sec.capitalize()
|
||||||
|
self.log(section)
|
||||||
|
articles = []
|
||||||
|
for a in soup.findAll('a', attrs={'href':lambda x: x and x.startswith('/' + sec + '/')}):
|
||||||
|
url = absurl(a['href'].split('?')[0])
|
||||||
|
if url in {index + sec + '/', index + sec}:
|
||||||
|
continue
|
||||||
|
title = self.tag_to_string(a)
|
||||||
|
self.log('\t', title, '\n\t\t', url)
|
||||||
|
articles.append({'title': title, 'url': url})
|
||||||
|
if articles:
|
||||||
|
feeds.append((section, articles))
|
||||||
|
return feeds
|
||||||
|
|
||||||
def populate_article_metadata(self, article, soup, first):
|
def populate_article_metadata(self, article, soup, first):
|
||||||
article.title = article.title.replace(' - 南方周末', '')
|
if soup.find(attrs={'class':'intro'}:
|
||||||
article.url = soup.find('h1')['title']
|
article.summary = article.text_summary = self.tag_to_string(soup.find(attrs={'class':'intro'}))
|
||||||
article.summary = self.tag_to_string(soup.find(attrs={'class':'intro'}))
|
|
||||||
article.text_summary = self.tag_to_string(soup.find(attrs={'class':'intro'}))
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from calibre.ptempfile import PersistentTemporaryFile
|
#!/usr/bin/env python
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
|
|
||||||
@ -22,43 +22,31 @@ class inc42(BasicNewsRecipe):
|
|||||||
classes('also-read slick-list slides-three common-card'),
|
classes('also-read slick-list slides-three common-card'),
|
||||||
]
|
]
|
||||||
|
|
||||||
ignore_duplicate_articles = {'title'}
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
|
|
||||||
articles_are_obfuscated = True
|
def parse_index(self):
|
||||||
|
index = 'https://inc42.com/'
|
||||||
def get_obfuscated_article(self, url):
|
sections = [
|
||||||
br = self.get_browser()
|
'features', 'buzz', 'startups', 'resources'
|
||||||
try:
|
|
||||||
br.open(url)
|
|
||||||
except Exception as e:
|
|
||||||
url = e.hdrs.get('location')
|
|
||||||
soup = self.index_to_soup(url)
|
|
||||||
link = soup.find('a', href=True)
|
|
||||||
skip_sections =[ # add sections you want to skip
|
|
||||||
'/video/', '/videos/', '/media/'
|
|
||||||
]
|
]
|
||||||
if any(x in link['href'] for x in skip_sections):
|
feeds = []
|
||||||
self.log('Aborting Article ', link['href'])
|
soup = self.index_to_soup(index)
|
||||||
self.abort_article('skipping video links')
|
for sec in sections:
|
||||||
|
section = sec.capitalize()
|
||||||
|
self.log(section)
|
||||||
|
articles = []
|
||||||
|
for a in soup.findAll('a', attrs={'href':lambda x: x and x.startswith(index + sec + '/')}):
|
||||||
|
url = a['href']
|
||||||
|
if url == index + sec + '/':
|
||||||
|
continue
|
||||||
|
title = self.tag_to_string(a)
|
||||||
|
self.log('\t', title, '\n\t\t', url)
|
||||||
|
articles.append({'title': title, 'url': url})
|
||||||
|
if articles:
|
||||||
|
feeds.append((section, articles))
|
||||||
|
return feeds
|
||||||
|
|
||||||
self.log('Downloading ', link['href'])
|
|
||||||
html = br.open(link['href']).read()
|
|
||||||
pt = PersistentTemporaryFile('.html')
|
|
||||||
pt.write(html)
|
|
||||||
pt.close()
|
|
||||||
return pt.name
|
|
||||||
|
|
||||||
feeds = []
|
|
||||||
|
|
||||||
sections = [
|
|
||||||
'features', 'buzz', 'startups', 'resources'
|
|
||||||
]
|
|
||||||
|
|
||||||
for sec in sections:
|
|
||||||
a = 'https://news.google.com/rss/search?q=when:27h+allinurl:inc42.com{}&hl=en-IN&gl=IN&ceid=IN:en'
|
|
||||||
feeds.append((sec.capitalize(), a.format('%2F' + sec + '%2F')))
|
|
||||||
feeds.append(('Others', a.format('')))
|
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
for img in soup.findAll('img', attrs={'data-src':True}):
|
for img in soup.findAll('img', attrs={'data-src':True}):
|
||||||
|
@ -1,7 +1,12 @@
|
|||||||
from calibre.ptempfile import PersistentTemporaryFile
|
#!/usr/bin/env python
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
from datetime import date
|
||||||
|
|
||||||
|
|
||||||
|
def absurl(url):
|
||||||
|
if url.startswith('/'):
|
||||||
|
return 'https://www.irishtimes.com' + url
|
||||||
|
|
||||||
class IrishTimes(BasicNewsRecipe):
|
class IrishTimes(BasicNewsRecipe):
|
||||||
title = 'The Irish Times (free)'
|
title = 'The Irish Times (free)'
|
||||||
__author__ = 'unkn0wn'
|
__author__ = 'unkn0wn'
|
||||||
@ -24,9 +29,8 @@ class IrishTimes(BasicNewsRecipe):
|
|||||||
]
|
]
|
||||||
|
|
||||||
remove_attributes = ['width', 'height']
|
remove_attributes = ['width', 'height']
|
||||||
ignore_duplicate_articles = {'title'}
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
resolve_internal_links = True
|
resolve_internal_links = True
|
||||||
articles_are_obfuscated = True
|
|
||||||
|
|
||||||
def get_cover_url(self):
|
def get_cover_url(self):
|
||||||
from datetime import date
|
from datetime import date
|
||||||
@ -45,28 +49,6 @@ class IrishTimes(BasicNewsRecipe):
|
|||||||
cover = None
|
cover = None
|
||||||
return cover
|
return cover
|
||||||
|
|
||||||
def get_obfuscated_article(self, url):
|
|
||||||
br = self.get_browser()
|
|
||||||
try:
|
|
||||||
br.open(url)
|
|
||||||
except Exception as e:
|
|
||||||
url = e.hdrs.get('location')
|
|
||||||
soup = self.index_to_soup(url)
|
|
||||||
link = soup.find('a', href=True)
|
|
||||||
skip_sections =[ # add sections you want to skip
|
|
||||||
'/video/', '/videos/', '/media/', '/podcast'
|
|
||||||
]
|
|
||||||
if any(x in link['href'] for x in skip_sections):
|
|
||||||
self.log('Aborting Article', link['href'])
|
|
||||||
self.abort_article('skipping video links')
|
|
||||||
|
|
||||||
self.log('Found', link['href'])
|
|
||||||
html = br.open(link['href']).read()
|
|
||||||
pt = PersistentTemporaryFile('.html')
|
|
||||||
pt.write(html)
|
|
||||||
pt.close()
|
|
||||||
return pt.name
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
BasicNewsRecipe.__init__(self, *args, **kwargs)
|
BasicNewsRecipe.__init__(self, *args, **kwargs)
|
||||||
if self.output_profile.short_name.startswith('kindle'):
|
if self.output_profile.short_name.startswith('kindle'):
|
||||||
@ -78,13 +60,28 @@ class IrishTimes(BasicNewsRecipe):
|
|||||||
|
|
||||||
feeds = []
|
feeds = []
|
||||||
|
|
||||||
sections = [
|
def parse_index(self):
|
||||||
'ireland', 'world', 'opinion', 'politics', 'crime-law', 'culture', 'business',
|
index = 'https://www.irishtimes.com/'
|
||||||
'life-style', 'health', 'sport', 'property', 'food', 'abroad', 'environment',
|
sections = [
|
||||||
'obituaries'
|
'ireland', 'world', 'opinion', 'politics', 'crime-law', 'culture', 'business',
|
||||||
]
|
'life-style', 'health', 'sport', 'property', 'food', 'abroad', 'environment',
|
||||||
|
'obituaries'
|
||||||
for sec in sections:
|
]
|
||||||
a = 'https://news.google.com/rss/search?q=when:27h+allinurl:irishtimes.com{}&hl=en-IE&gl=IE&ceid=IE:en'
|
feeds = []
|
||||||
feeds.append((sec.capitalize(), a.format('%2F' + sec + '%2F')))
|
soup = self.index_to_soup(index)
|
||||||
feeds.append(('Others', a.format('')))
|
for sec in sections:
|
||||||
|
section = sec.capitalize()
|
||||||
|
self.log(section)
|
||||||
|
articles = []
|
||||||
|
for a in soup.findAll('a', attrs={'href':lambda x: x and x.startswith('/' + sec + '/')}):
|
||||||
|
url = absurl(a['href'].split('?')[0])
|
||||||
|
if url in {index + sec + '/', index + sec}:
|
||||||
|
continue
|
||||||
|
if date.today().strftime('%Y') not in url:
|
||||||
|
continue
|
||||||
|
title = self.tag_to_string(a)
|
||||||
|
self.log('\t', title, '\n\t\t', url)
|
||||||
|
articles.append({'title': title, 'url': url})
|
||||||
|
if articles:
|
||||||
|
feeds.append((section, articles))
|
||||||
|
return feeds
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
from urllib.parse import quote
|
#!/usr/bin/env python
|
||||||
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
|
def absurl(url):
|
||||||
|
if url.startswith('/'):
|
||||||
|
return 'https://www.livelaw.in' + url
|
||||||
|
|
||||||
class livelaw(BasicNewsRecipe):
|
class livelaw(BasicNewsRecipe):
|
||||||
title = 'Live Law'
|
title = 'Live Law'
|
||||||
@ -20,29 +22,12 @@ class livelaw(BasicNewsRecipe):
|
|||||||
remove_javascript = True
|
remove_javascript = True
|
||||||
ignore_duplicate_articles = {'title', 'url'}
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
simultaneous_downloads = 1
|
simultaneous_downloads = 1
|
||||||
art_url = ''
|
|
||||||
|
|
||||||
extra_css = '''
|
extra_css = '''
|
||||||
.news_detail_person_detail {font-size:small; color:#202020;}
|
.news_detail_person_detail {font-size:small; color:#202020;}
|
||||||
.news-description { color:#202020; font-style:italic; }
|
.news-description { color:#202020; font-style:italic; }
|
||||||
'''
|
'''
|
||||||
|
|
||||||
articles_are_obfuscated = True
|
|
||||||
|
|
||||||
def get_obfuscated_article(self, url):
|
|
||||||
br = self.get_browser()
|
|
||||||
soup = self.index_to_soup(url)
|
|
||||||
link = soup.a['href']
|
|
||||||
skip_sections =[ # add sections you want to skip
|
|
||||||
'/video/', '/videos/', '/multimedia/',
|
|
||||||
]
|
|
||||||
if any(x in link for x in skip_sections):
|
|
||||||
self.abort_article('skipping video links ', link)
|
|
||||||
self.log('Found ', link)
|
|
||||||
self.art_url = link
|
|
||||||
html = br.open(link).read()
|
|
||||||
return ({ 'data': html, 'url': link })
|
|
||||||
|
|
||||||
keep_only_tags = [
|
keep_only_tags = [
|
||||||
dict(name='div', attrs={'id':'page-content-wrapper'})
|
dict(name='div', attrs={'id':'page-content-wrapper'})
|
||||||
]
|
]
|
||||||
@ -65,22 +50,27 @@ class livelaw(BasicNewsRecipe):
|
|||||||
h2.name = 'p'
|
h2.name = 'p'
|
||||||
return soup
|
return soup
|
||||||
|
|
||||||
feeds = []
|
def parse_index(self):
|
||||||
|
index = 'https://www.livelaw.in/'
|
||||||
when = '27' # hours
|
sections = [
|
||||||
index = 'https://www.livelaw.in/'
|
'top-stories', 'supreme-court', 'high-court', 'news-updates', 'consumer-cases', 'articles',
|
||||||
|
'lawschool', 'law-firms', 'round-ups'
|
||||||
sections = [
|
]
|
||||||
'top-stories', 'supreme-court', 'high-court', 'news-updates', 'consumer-cases', 'articles',
|
feeds = []
|
||||||
'lawschool', 'law-firms', 'round-ups'
|
soup = self.index_to_soup(index)
|
||||||
]
|
for sec in sections:
|
||||||
|
section = sec.capitalize()
|
||||||
a = 'https://news.google.com/rss/search?q=when:{}h+allinurl:{}&hl=en-IN&gl=IN&ceid=IN:en'
|
self.log(section)
|
||||||
|
articles = []
|
||||||
for sec in sections:
|
for a in soup.findAll('a', attrs={'href':lambda x: x and x.startswith('/' + sec + '/')}):
|
||||||
feeds.append((sec.capitalize(), a.format(when, quote(index + sec, safe=''))))
|
url = absurl(a['href'].split('?')[0])
|
||||||
feeds.append(('Others' , a.format(when, quote(index, safe=''))))
|
if url in {index + sec + '/', index + sec}:
|
||||||
|
continue
|
||||||
def populate_article_metadata(self, article, soup, first):
|
if not url[-1].isdigit():
|
||||||
article.url = self.art_url
|
continue
|
||||||
article.title = article.title.replace(' - Live Law - Indian Legal News', '')
|
title = self.tag_to_string(a)
|
||||||
|
self.log('\t', title, '\n\t\t', url)
|
||||||
|
articles.append({'title': title, 'url': url})
|
||||||
|
if articles:
|
||||||
|
feeds.append((section, articles))
|
||||||
|
return feeds
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
"""
|
"""
|
||||||
www.mainichi.jp/english
|
www.mainichi.jp/english
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from calibre.ptempfile import PersistentTemporaryFile
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
|
||||||
@ -23,24 +23,26 @@ class MainichiEnglishNews(BasicNewsRecipe):
|
|||||||
remove_javascript = True
|
remove_javascript = True
|
||||||
auto_cleanup = True
|
auto_cleanup = True
|
||||||
|
|
||||||
ignore_duplicate_articles = {'title'}
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
|
|
||||||
articles_are_obfuscated = True
|
def parse_index(self):
|
||||||
|
index = 'https://mainichi.jp/english/'
|
||||||
def get_obfuscated_article(self, url):
|
sections = [
|
||||||
br = self.get_browser()
|
'articles'
|
||||||
try:
|
]
|
||||||
br.open(url)
|
feeds = []
|
||||||
except Exception as e:
|
soup = self.index_to_soup(index)
|
||||||
url = e.hdrs.get('location')
|
for sec in sections:
|
||||||
soup = self.index_to_soup(url)
|
section = sec.capitalize()
|
||||||
link = soup.find('a', href=True)
|
self.log(section)
|
||||||
html = br.open(link['href']).read()
|
articles = []
|
||||||
pt = PersistentTemporaryFile('.html')
|
for a in soup.findAll('a', attrs={'href':lambda x: x and x.startswith(index + sec + '/')}):
|
||||||
pt.write(html)
|
url = a['href']
|
||||||
pt.close()
|
if url in {index + sec + '/', index + sec}:
|
||||||
return pt.name
|
continue
|
||||||
|
title = self.tag_to_string(a)
|
||||||
feeds = [
|
self.log('\t', title, '\n\t\t', url)
|
||||||
('Articles', 'https://news.google.com/rss/search?q=when:48h+allinurl:mainichi.jp%2Fenglish%2Farticles%2F&hl=en-US&gl=US&ceid=US:en')
|
articles.append({'title': title, 'url': url})
|
||||||
]
|
if articles:
|
||||||
|
feeds.append((section, articles))
|
||||||
|
return feeds
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
from urllib.parse import quote
|
#!/usr/bin/env python
|
||||||
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
|
|
||||||
@ -26,21 +25,6 @@ class MoneyControlRecipe(BasicNewsRecipe):
|
|||||||
.article_desc { font-style:italic; color:#202020; }
|
.article_desc { font-style:italic; color:#202020; }
|
||||||
'''
|
'''
|
||||||
|
|
||||||
articles_are_obfuscated = True
|
|
||||||
|
|
||||||
def get_obfuscated_article(self, url):
|
|
||||||
br = self.get_browser()
|
|
||||||
soup = self.index_to_soup(url)
|
|
||||||
link = soup.a['href']
|
|
||||||
skip_sections =[ # add sections you want to skip
|
|
||||||
'/video/', '/videos/', '/multimedia/',
|
|
||||||
]
|
|
||||||
if any(x in link for x in skip_sections):
|
|
||||||
self.abort_article('skipping video links ', link)
|
|
||||||
self.log('Found ', link)
|
|
||||||
html = br.open(link).read()
|
|
||||||
return ({ 'data': html, 'url': link })
|
|
||||||
|
|
||||||
keep_only_tags = [
|
keep_only_tags = [
|
||||||
dict(name='div', attrs={'id':lambda x: x and x.startswith('article-')})
|
dict(name='div', attrs={'id':lambda x: x and x.startswith('article-')})
|
||||||
]
|
]
|
||||||
@ -65,41 +49,52 @@ class MoneyControlRecipe(BasicNewsRecipe):
|
|||||||
img['src'] = img['data-src']
|
img['src'] = img['data-src']
|
||||||
return soup
|
return soup
|
||||||
|
|
||||||
feeds = []
|
def parse_index(self):
|
||||||
|
index = 'https://www.moneycontrol.com/'
|
||||||
|
|
||||||
when = oldest_article*24
|
business_sections = [
|
||||||
index = 'https://www.moneycontrol.com/'
|
'markets', 'stocks', 'ipo', 'budget', 'banks', 'moneycontrol-research', 'economy', 'earnings', 'real-estate',
|
||||||
|
'personal-finance', 'commodities', 'trade', 'companies'
|
||||||
|
]
|
||||||
|
|
||||||
business_sections = [
|
news_sections = [
|
||||||
'markets', 'stocks', 'ipo', 'budget', 'banks', 'moneycontrol-research', 'economy', 'earnings', 'real-estate',
|
'india', 'world', 'opinion', 'politics', 'technology', 'trends', 'lifestyle'
|
||||||
'personal-finance', 'commodities', 'trade', 'companies'
|
]
|
||||||
]
|
|
||||||
|
|
||||||
a = 'https://news.google.com/rss/search?q=when:{}h+allinurl:{}&hl=en-IN&gl=IN&ceid=IN:en'
|
feeds = []
|
||||||
|
soup = self.index_to_soup(index)
|
||||||
for sec in business_sections:
|
for b_sec in business_sections:
|
||||||
allinurl_a = index + 'news/business'
|
burl = index + 'news/business/'
|
||||||
feeds.append((sec.capitalize(), a.format(when, quote(allinurl_a + sec, safe=''))))
|
section = b_sec.capitalize()
|
||||||
feeds.append(('Business' , a.format(when, quote(allinurl_a, safe=''))))
|
self.log(section)
|
||||||
|
articles = []
|
||||||
news_sections = [
|
for a in soup.findAll('a', attrs={'href':lambda x: x and x.startswith(burl + b_sec + '/')}):
|
||||||
'india', 'world', 'opinion', 'politics', 'technology', 'trends', 'lifestyle'
|
url = a['href']
|
||||||
]
|
if url in {burl + b_sec + '/', burl + b_sec}:
|
||||||
|
continue
|
||||||
for sec in news_sections:
|
title = self.tag_to_string(a)
|
||||||
allinurl_b = index + 'news'
|
self.log('\t', title, '\n\t\t', url)
|
||||||
feeds.append((sec.capitalize(), a.format(when, quote(allinurl_b + sec, safe=''))))
|
articles.append({'title': title, 'url': url})
|
||||||
feeds.append(('News', a.format(when, quote(allinurl_b, safe=''), '')))
|
if articles:
|
||||||
feeds.append(
|
feeds.append((section, articles))
|
||||||
('Others', 'https://news.google.com/rss/search?q=when:{}h+allinurl:{}&hl=en-IN&gl=IN&ceid=IN:en'.format(when, quote(index, safe='')))
|
for n_sec in news_sections:
|
||||||
)
|
nurl = index + 'news/'
|
||||||
|
nsection = n_sec.capitalize()
|
||||||
|
self.log(nsection)
|
||||||
|
articles = []
|
||||||
|
for a in soup.findAll('a', attrs={'href':lambda x: x and x.startswith(nurl + n_sec + '/')}):
|
||||||
|
url = a['href']
|
||||||
|
if url in {nurl + n_sec + '/', nurl + n_sec}:
|
||||||
|
continue
|
||||||
|
title = self.tag_to_string(a)
|
||||||
|
self.log('\t', title, '\n\t\t', url)
|
||||||
|
articles.append({'title': title, 'url': url})
|
||||||
|
if articles:
|
||||||
|
feeds.append((nsection, articles))
|
||||||
|
return feeds
|
||||||
|
|
||||||
def populate_article_metadata(self, article, soup, first):
|
def populate_article_metadata(self, article, soup, first):
|
||||||
div = soup.find('div', attrs={'data-io-article-url':True})
|
|
||||||
if div:
|
|
||||||
article.url = div['data-io-article-url']
|
|
||||||
desc = soup.find(**classes('article_desc'))
|
desc = soup.find(**classes('article_desc'))
|
||||||
if desc:
|
if desc:
|
||||||
article.summary = self.tag_to_string(desc)
|
article.summary = self.tag_to_string(desc)
|
||||||
article.text_summary = article.summary
|
article.text_summary = article.summary
|
||||||
article.title = article.title.replace(' - Moneycontrol', '')
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from calibre.ptempfile import PersistentTemporaryFile
|
#!/usr/bin/env python
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
|
|
||||||
@ -20,29 +20,6 @@ class newsminute(BasicNewsRecipe):
|
|||||||
resolve_internal_links = True
|
resolve_internal_links = True
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
remove_attributes = ['style', 'height', 'width']
|
remove_attributes = ['style', 'height', 'width']
|
||||||
articles_are_obfuscated = True
|
|
||||||
|
|
||||||
def get_obfuscated_article(self, url):
|
|
||||||
br = self.get_browser()
|
|
||||||
try:
|
|
||||||
br.open(url)
|
|
||||||
except Exception as e:
|
|
||||||
url = e.hdrs.get('location')
|
|
||||||
soup = self.index_to_soup(url)
|
|
||||||
link = soup.find('a', href=True)
|
|
||||||
skip_sections =[ # add sections you want to skip
|
|
||||||
'/video/', '/videos/', '/media/', 'podcast-'
|
|
||||||
]
|
|
||||||
if any(x in link['href'] for x in skip_sections):
|
|
||||||
self.log('Aborting Article ', link['href'])
|
|
||||||
self.abort_article('skipping video links')
|
|
||||||
|
|
||||||
self.log('Downloading ', link['href'])
|
|
||||||
html = br.open(link['href']).read()
|
|
||||||
pt = PersistentTemporaryFile('.html')
|
|
||||||
pt.write(html)
|
|
||||||
pt.close()
|
|
||||||
return pt.name
|
|
||||||
|
|
||||||
keep_only_tags = [
|
keep_only_tags = [
|
||||||
classes(
|
classes(
|
||||||
@ -50,21 +27,29 @@ class newsminute(BasicNewsRecipe):
|
|||||||
),
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
feeds = []
|
|
||||||
|
|
||||||
sections = [
|
|
||||||
'tamil-nadu', 'telangana', 'andhra-pradesh', 'karnataka', 'kerala'
|
|
||||||
]
|
|
||||||
|
|
||||||
for sec in sections:
|
|
||||||
a = 'https://news.google.com/rss/search?q=when:27h+allinurl:https%3A%2F%2Fwww.thenewsminute.com{}&hl=en-IN&gl=IN&ceid=IN:en'
|
|
||||||
feeds.append((sec.capitalize(), a.format('%2F' + sec + '%2F')))
|
|
||||||
feeds.append(('Others', a.format('')))
|
|
||||||
|
|
||||||
def populate_article_metadata(self, article, soup, first):
|
|
||||||
article.title = article.title.replace(' - The News Minute', '')
|
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
for img in soup.findAll('img', attrs={'data-src':True}):
|
for img in soup.findAll('img', attrs={'data-src':True}):
|
||||||
img['src'] = img['data-src']
|
img['src'] = img['data-src']
|
||||||
return soup
|
return soup
|
||||||
|
|
||||||
|
def parse_index(self):
|
||||||
|
index = 'https://www.thenewsminute.com/'
|
||||||
|
sections = [
|
||||||
|
'tamil-nadu', 'telangana', 'andhra-pradesh', 'karnataka', 'kerala'
|
||||||
|
]
|
||||||
|
feeds = []
|
||||||
|
soup = self.index_to_soup(index)
|
||||||
|
for sec in sections:
|
||||||
|
section = sec.capitalize()
|
||||||
|
self.log(section)
|
||||||
|
articles = []
|
||||||
|
for a in soup.findAll('a', attrs={'href':lambda x: x and x.startswith(index + sec + '/')}):
|
||||||
|
url = a['href']
|
||||||
|
if url in {index + sec + '/', index + sec}:
|
||||||
|
continue
|
||||||
|
title = self.tag_to_string(a)
|
||||||
|
self.log('\t', title, '\n\t\t', url)
|
||||||
|
articles.append({'title': title, 'url': url})
|
||||||
|
if articles:
|
||||||
|
feeds.append((section, articles))
|
||||||
|
return feeds
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from calibre.ptempfile import PersistentTemporaryFile
|
#!/usr/bin/env python
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
|
|
||||||
@ -19,30 +19,6 @@ class scroll(BasicNewsRecipe):
|
|||||||
ignore_duplicate_articles = {'title', 'url'}
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
remove_attributes = ['style', 'height', 'width']
|
remove_attributes = ['style', 'height', 'width']
|
||||||
|
|
||||||
articles_are_obfuscated = True
|
|
||||||
|
|
||||||
def get_obfuscated_article(self, url):
|
|
||||||
br = self.get_browser()
|
|
||||||
try:
|
|
||||||
br.open(url)
|
|
||||||
except Exception as e:
|
|
||||||
url = e.hdrs.get('location')
|
|
||||||
soup = self.index_to_soup(url)
|
|
||||||
link = soup.find('a', href=True)
|
|
||||||
skip_sections =[ # add sections you want to skip
|
|
||||||
'/video/', '/videos/', '/announcements/'
|
|
||||||
]
|
|
||||||
if any(x in link['href'] for x in skip_sections):
|
|
||||||
self.log('Aborting Article ', link['href'])
|
|
||||||
self.abort_article('skipping video links')
|
|
||||||
|
|
||||||
self.log('Downloading ', link['href'])
|
|
||||||
html = br.open(link['href']).read()
|
|
||||||
pt = PersistentTemporaryFile('.html')
|
|
||||||
pt.write(html)
|
|
||||||
pt.close()
|
|
||||||
return pt.name
|
|
||||||
|
|
||||||
extra_css = '''
|
extra_css = '''
|
||||||
.orange-tag, .article-meta-container { font-size:small; }
|
.orange-tag, .article-meta-container { font-size:small; }
|
||||||
.featured-image, .cms-block-image { text-align:center; font-size:small; }
|
.featured-image, .cms-block-image { text-align:center; font-size:small; }
|
||||||
@ -55,10 +31,28 @@ class scroll(BasicNewsRecipe):
|
|||||||
|
|
||||||
remove_tags = [classes('comments-entry-point-meta')]
|
remove_tags = [classes('comments-entry-point-meta')]
|
||||||
|
|
||||||
feeds = [('Articles', 'https://news.google.com/rss/search?q=when:27h+allinurl:https%3A%2F%2Fscroll.in&hl=en-IN&gl=IN&ceid=IN:en')]
|
def parse_index(self):
|
||||||
|
index = 'https://scroll.in/'
|
||||||
|
sections = [
|
||||||
|
'article', 'magazine'
|
||||||
|
]
|
||||||
|
feeds = []
|
||||||
|
soup = self.index_to_soup(index)
|
||||||
|
for sec in sections:
|
||||||
|
section = sec.capitalize()
|
||||||
|
self.log(section)
|
||||||
|
articles = []
|
||||||
|
for a in soup.findAll('a', attrs={'href':lambda x: x and x.startswith(index + sec + '/')}):
|
||||||
|
url = a['href'].split('?')[0]
|
||||||
|
if url in {index + sec + '/', index + sec}:
|
||||||
|
continue
|
||||||
|
title = self.tag_to_string(a)
|
||||||
|
self.log('\t', title, '\n\t\t', url)
|
||||||
|
articles.append({'title': title, 'url': url})
|
||||||
|
if articles:
|
||||||
|
feeds.append((section, articles))
|
||||||
|
return feeds
|
||||||
|
|
||||||
def populate_article_metadata(self, article, soup, first):
|
def populate_article_metadata(self, article, soup, first):
|
||||||
# article.url = ''
|
if soup.find('h2'):
|
||||||
article.summary = self.tag_to_string(soup.find('h2'))
|
article.summary = article.text_summary = self.tag_to_string(soup.find('h2'))
|
||||||
article.text_summary = self.tag_to_string(soup.find('h2'))
|
|
||||||
article.title = article.title.replace(' - Scroll.in', '')
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
from calibre.web.feeds.news import BasicNewsRecipe, classes
|
||||||
|
|
||||||
|
|
||||||
@ -11,7 +12,7 @@ class STHKRecipe(BasicNewsRecipe):
|
|||||||
masthead_url = 'https://std.stheadline.com/dist/images/logo-v2@2x.png'
|
masthead_url = 'https://std.stheadline.com/dist/images/logo-v2@2x.png'
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
remove_javascript = True
|
remove_javascript = True
|
||||||
ignore_duplicate_articles = {'title'}
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
use_embedded_content = False
|
use_embedded_content = False
|
||||||
remove_attributes = ['style', 'height', 'width']
|
remove_attributes = ['style', 'height', 'width']
|
||||||
@ -31,34 +32,26 @@ class STHKRecipe(BasicNewsRecipe):
|
|||||||
classes('in-article-banner stick-box-gray article-pagination comments')
|
classes('in-article-banner stick-box-gray article-pagination comments')
|
||||||
]
|
]
|
||||||
|
|
||||||
articles_are_obfuscated = True
|
def parse_index(self):
|
||||||
|
index = 'https://std.stheadline.com/'
|
||||||
def get_obfuscated_article(self, url):
|
sections = [
|
||||||
br = self.get_browser()
|
'daily', 'realtime', 'supplement'
|
||||||
try:
|
|
||||||
br.open(url)
|
|
||||||
except Exception as e:
|
|
||||||
url = e.hdrs.get('location')
|
|
||||||
soup = self.index_to_soup(url)
|
|
||||||
link = soup.find('a', href=True)['href']
|
|
||||||
skip_sections = [ # add sections you want to skip
|
|
||||||
'/video/', '/videos/', '/media/', 'podcast'
|
|
||||||
]
|
]
|
||||||
if any(x in link for x in skip_sections):
|
feeds = []
|
||||||
self.log('Aborting Article ', link)
|
soup = self.index_to_soup(index)
|
||||||
self.abort_article('skipping video links')
|
for sec in sections:
|
||||||
html = br.open(link).read()
|
section = sec.capitalize()
|
||||||
return ({ 'data': html, 'url': link })
|
self.log(section)
|
||||||
|
articles = []
|
||||||
feeds = [
|
for a in soup.findAll('a', attrs={'href':lambda x: x and x.startswith(index + sec + '/')}):
|
||||||
('日報', 'https://news.google.com/rss/search?q=when:27h+allinurl:https%3A%2F%2Fstd.stheadline.com%2Fdaily%2F&hl=zh-HK&gl=HK&ceid=HK:zh'),
|
url = a['href']
|
||||||
('即時', 'https://news.google.com/rss/search?q=when:27h+allinurl:https%3A%2F%2Fstd.stheadline.com%2Frealtime%2F&hl=zh-HK&gl=HK&ceid=HK:zh'),
|
if url in {index + sec + '/', index + sec}:
|
||||||
('副刊', 'https://news.google.com/rss/search?q=when:27h+allinurl:https%3A%2F%2Fstd.stheadline.com%2Fsupplement%2F&hl=zh-HK&gl=HK&ceid=HK:zh'),
|
continue
|
||||||
('其他的 新聞', 'https://news.google.com/rss/search?q=when:27h+allinurl:https%3A%2F%2Fstd.stheadline.com&hl=zh-HK&gl=HK&ceid=HK:zh')
|
if '/article/' not in url:
|
||||||
]
|
continue
|
||||||
|
title = self.tag_to_string(a)
|
||||||
def populate_article_metadata(self, article, soup, first):
|
self.log('\t', title, '\n\t\t', url)
|
||||||
article.title = article.title.replace(' - 星島頭條', '')
|
articles.append({'title': title, 'url': url})
|
||||||
|
if articles:
|
||||||
def preprocess_raw_html(self, raw, *a):
|
feeds.append((section, articles))
|
||||||
return raw.replace('<p></p>', '')
|
return feeds
|
||||||
|
@ -6,7 +6,6 @@ __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
|||||||
usatoday.com
|
usatoday.com
|
||||||
'''
|
'''
|
||||||
|
|
||||||
from calibre.ptempfile import PersistentTemporaryFile
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
|
||||||
@ -15,6 +14,9 @@ def classes(classes):
|
|||||||
return dict(attrs={
|
return dict(attrs={
|
||||||
'class': lambda x: x and frozenset(x.split()).intersection(q)})
|
'class': lambda x: x and frozenset(x.split()).intersection(q)})
|
||||||
|
|
||||||
|
def absurl(url):
|
||||||
|
if url.startswith('/'):
|
||||||
|
return 'https://www.usatoday.com' + url
|
||||||
|
|
||||||
class USAToday(BasicNewsRecipe):
|
class USAToday(BasicNewsRecipe):
|
||||||
|
|
||||||
@ -60,44 +62,32 @@ class USAToday(BasicNewsRecipe):
|
|||||||
}
|
}
|
||||||
'''
|
'''
|
||||||
|
|
||||||
ignore_duplicate_articles = {'title'}
|
ignore_duplicate_articles = {'title', 'url'}
|
||||||
resolve_internal_links = True
|
resolve_internal_links = True
|
||||||
remove_empty_feeds = True
|
remove_empty_feeds = True
|
||||||
|
|
||||||
articles_are_obfuscated = True
|
def parse_index(self):
|
||||||
|
index = 'https://www.usatoday.com/'
|
||||||
def get_obfuscated_article(self, url):
|
sections = [
|
||||||
br = self.get_browser()
|
'news', 'opinion', 'tech', 'entertainment', 'money', 'sports', 'travel', 'life', 'investigations',
|
||||||
try:
|
|
||||||
br.open(url)
|
|
||||||
except Exception as e:
|
|
||||||
url = e.hdrs.get('location')
|
|
||||||
soup = self.index_to_soup(url)
|
|
||||||
link = soup.find('a', href=True)
|
|
||||||
skip_sections =[ # add sections you want to skip
|
|
||||||
'/video/', '/videos/', '/media/', 'podcast-'
|
|
||||||
]
|
]
|
||||||
if any(x in link['href'] for x in skip_sections):
|
feeds = []
|
||||||
self.log('Aborting Article ', link['href'])
|
soup = self.index_to_soup(index)
|
||||||
self.abort_article('skipping video links')
|
for sec in sections:
|
||||||
|
section = sec.capitalize()
|
||||||
|
self.log(section)
|
||||||
|
articles = []
|
||||||
|
for a in soup.findAll('a', attrs={'href':lambda x: x and x.startswith('/story/' + sec + '/')}):
|
||||||
|
url = absurl(a['href'].split('?')[0])
|
||||||
|
if url == index + '/story/' + sec + '/':
|
||||||
|
continue
|
||||||
|
title = self.tag_to_string(a)
|
||||||
|
self.log('\t', title, '\n\t\t', url)
|
||||||
|
articles.append({'title': title, 'url': url})
|
||||||
|
if articles:
|
||||||
|
feeds.append((section, articles))
|
||||||
|
return feeds
|
||||||
|
|
||||||
self.log('Downloading ', link['href'])
|
|
||||||
html = br.open(link['href']).read()
|
|
||||||
pt = PersistentTemporaryFile('.html')
|
|
||||||
pt.write(html)
|
|
||||||
pt.close()
|
|
||||||
return pt.name
|
|
||||||
|
|
||||||
feeds = []
|
|
||||||
|
|
||||||
sections = [
|
|
||||||
'news', 'nation', 'politics', 'opinion', 'tech', 'entertainment', 'money', 'sports', 'travel', 'life', 'investigations',
|
|
||||||
]
|
|
||||||
|
|
||||||
for sec in sections:
|
|
||||||
a = 'https://news.google.com/rss/search?q=when:27h+allinurl:https%3A%2F%2Fwww.usatoday.com%2Fstory{}&hl=en-US&gl=US&ceid=US:en'
|
|
||||||
feeds.append((sec.capitalize(), a.format('%2F' + sec + '%2F')))
|
|
||||||
# feeds.append(('Others', a.format('')))
|
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
for img in soup.findAll('img', src=True):
|
for img in soup.findAll('img', src=True):
|
||||||
|
Loading…
x
Reference in New Issue
Block a user