mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Update google_news.recipe
This commit is contained in:
parent
0c6f9949cd
commit
3865197e1b
@ -2,6 +2,7 @@
|
|||||||
# vim:fileencoding=utf-8
|
# vim:fileencoding=utf-8
|
||||||
from __future__ import unicode_literals, division, absolute_import, print_function
|
from __future__ import unicode_literals, division, absolute_import, print_function
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
from calibre.ptempfile import PersistentTemporaryFile
|
||||||
import json
|
import json
|
||||||
|
|
||||||
# a serarch topic, filled into the string below. You can change that to anything google news should be searched for...
|
# a serarch topic, filled into the string below. You can change that to anything google news should be searched for...
|
||||||
@ -16,9 +17,9 @@ class google_news_de(BasicNewsRecipe):
|
|||||||
title = 'Google News'
|
title = 'Google News'
|
||||||
cover_url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/d/da/Google_News_icon.svg/500px-Google_News_icon.svg.png'
|
cover_url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/d/da/Google_News_icon.svg/500px-Google_News_icon.svg.png'
|
||||||
# Author
|
# Author
|
||||||
__author__ = 'Volker Heggemann, VoHe'
|
__author__ = 'Volker Heggemann, VoHe, unkn0wn'
|
||||||
# oldest article to download (in days) ---- can be edit by user
|
# oldest article to download (in days) ---- can be edit by user
|
||||||
oldest_article = 2
|
oldest_article = 1.25
|
||||||
# describes itself, ---- can be edit by user
|
# describes itself, ---- can be edit by user
|
||||||
max_articles_per_feed = 200
|
max_articles_per_feed = 200
|
||||||
# speed up the download on fast computers be careful (I test max.20)
|
# speed up the download on fast computers be careful (I test max.20)
|
||||||
@ -36,6 +37,30 @@ class google_news_de(BasicNewsRecipe):
|
|||||||
|
|
||||||
# remove the rubbish (in ebook)
|
# remove the rubbish (in ebook)
|
||||||
auto_cleanup = True
|
auto_cleanup = True
|
||||||
|
|
||||||
|
articles_are_obfuscated = True
|
||||||
|
|
||||||
|
def get_obfuscated_article(self, url):
|
||||||
|
br = self.get_browser()
|
||||||
|
try:
|
||||||
|
br.open(url)
|
||||||
|
except Exception as e:
|
||||||
|
url = e.hdrs.get('location')
|
||||||
|
soup = self.index_to_soup(url)
|
||||||
|
link = soup.find('a', href=True)
|
||||||
|
skip_sections =[ # add sections you want to skip
|
||||||
|
'/video/', '/videos/', '/media/', 'podcast-'
|
||||||
|
]
|
||||||
|
if any(x in link['href'] for x in skip_sections):
|
||||||
|
self.log('Aborting Article ', link['href'])
|
||||||
|
self.abort_article('skipping video links')
|
||||||
|
|
||||||
|
self.log('Found link: ', link['href'])
|
||||||
|
html = br.open(link['href']).read()
|
||||||
|
pt = PersistentTemporaryFile('.html')
|
||||||
|
pt.write(html)
|
||||||
|
pt.close()
|
||||||
|
return pt.name
|
||||||
|
|
||||||
# now the content description and URL follows
|
# now the content description and URL follows
|
||||||
# feel free to add, wipe out what you need ---- can be edit by user
|
# feel free to add, wipe out what you need ---- can be edit by user
|
||||||
|
Loading…
x
Reference in New Issue
Block a user