Fix typos in recipes/

This commit is contained in:
luz paz 2022-07-18 22:05:00 -04:00
parent f0308f23a9
commit 6eb95a700c
46 changed files with 68 additions and 68 deletions

View File

@ -47,7 +47,7 @@ class AdvancedUserRecipe1282101454(BasicNewsRecipe):
masthead_url = 'http://gawand.org/wp-content/uploads/2010/06/ajc-logo.gif'
# Pick your poison. Business seems to be mostly cross-linked articles. Premium and cross-linked
# articels will be dropped.
# articles will be dropped.
feeds = [
('Breaking News', 'http://www.ajc.com/list/rss/online/ajc-auto-list-iphone-topnews/aFKq/'),
('Metro and Georgia',

View File

@ -234,8 +234,8 @@ class BBCNews(BasicNewsRecipe):
#
oldest_article = 1.5
# Number of simultaneous downloads. 20 is consistantly working fine on the
# BBC News feeds with no problems. Speeds things up from the defualt of 5.
# Number of simultaneous downloads. 20 is consistently working fine on the
# BBC News feeds with no problems. Speeds things up from the default of 5.
# If you have a lot of feeds and/or have increased oldest_article above 2
# then you may wish to try increasing simultaneous_downloads to 25-30,
# Or, of course, if you are in a hurry. [I've not tried beyond 20.]

View File

@ -42,7 +42,7 @@ class BBCBrasilRecipe(BasicNewsRecipe):
#
# There are 68 feeds below which constitute the bulk of the available rss
# feeds on the BBC web site. These include 5 blogs by editors and
# correspondants, 16 sports feeds, 15 'sub' regional feeds (Eg. North West
# correspondents, 16 sports feeds, 15 'sub' regional feeds (Eg. North West
# Wales, Scotland Business), and 7 Welsh language feeds.
#
# Some of the feeds are low volume (Eg. blogs), or very low volume (Eg. Click)
@ -108,8 +108,8 @@ class BBCBrasilRecipe(BasicNewsRecipe):
#
oldest_article = 1.5
# Number of simultaneous downloads. 20 is consistantly working fine on the
# BBC News feeds with no problems. Speeds things up from the defualt of 5.
# Number of simultaneous downloads. 20 is consistently working fine on the
# BBC News feeds with no problems. Speeds things up from the default of 5.
# If you have a lot of feeds and/or have increased oldest_article above 2
# then you may wish to try increasing simultaneous_downloads to 25-30,
# Or, of course, if you are in a hurry. [I've not tried beyond 20.]
@ -383,7 +383,7 @@ class BBCBrasilRecipe(BasicNewsRecipe):
skip_reg_exp = '^.*skip.*$'
# Extra things to remove due to the addition of 'storycontent' in keep_only_tags,
# which are the alterative table design based pages. The purpose of some of these
# which are the alternative table design based pages. The purpose of some of these
# is not entirely clear from the pages (which are a total mess!).
# Remove mapping based tags, Eg. <map id="world_map">

View File

@ -7,17 +7,17 @@ from calibre.web.feeds.news import BasicNewsRecipe
class germanyBSI(BasicNewsRecipe):
# Titel of the Recipe
# Title of the Recipe
# title = 'News des Bundesamt für Sicherheit in der Informationstechnik'
title = 'BSI News - DE'
cover_url = 'https://www.bsi.bund.de/SiteGlobals/Frontend/Images/BSI/logo.png'
# Author
__author__ = 'Volker Heggemann, VoHe'
# oldes article to download (in days) ---- can be edit by user
# oldest article to download (in days) ---- can be edit by user
oldest_article = 7
# describes itself, ---- can be edit by user
max_articles_per_feed = 100
# speed up the download on fast computers be carefull (I test max.20)
# speed up the download on fast computers be careful (I test max.20)
# ---- can be edit by user
simultaneous_downloads = 10
# description, some Reader show this in titlepage

View File

@ -63,7 +63,7 @@ class CaravanMagazine(BasicNewsRecipe):
raise ValueError('Login failed, check your username and password')
return br
# To parse artice toc
# To parse article toc
def parse_index(self):
base_url = 'https://www.caravanmagazine.in/'
soup = self.index_to_soup('{0}magazine'.format(base_url))

View File

@ -68,7 +68,7 @@ class CaravanMagazineHindi(BasicNewsRecipe):
raise ValueError('Login failed, check your username and password')
return br
# To parse artice toc
# To parse article toc
def parse_index(self):
base_url = 'https://www.caravanmagazine.in/'
soup = self.index_to_soup('{0}magazine'.format(base_url))

View File

@ -7,7 +7,7 @@ class AdvancedUserRecipe1278162597(BasicNewsRecipe):
oldest_article = 7
max_articles_per_feed = 100
pubisher = 'www.ce.cn - China Economic net - Beijing'
publisher = 'www.ce.cn - China Economic net - Beijing'
description = 'China Economic Net Magazine'
category = 'Economic News Magazine, Chinese, China'
feeds = [

View File

@ -17,7 +17,7 @@ class AdvancedUserRecipe1294946868(BasicNewsRecipe):
publisher = u'Grupo Prisa'
__author__ = 'Luis Hernandez'
description = 'spanish web about money and bussiness, free edition'
description = 'spanish web about money and business, free edition'
cover_url = 'http://www.prisa.com/images/logos/logo_cinco_dias.gif'
oldest_article = 2

View File

@ -90,7 +90,7 @@ class Clarin(BasicNewsRecipe):
remove_attributes = ['lang']
# Images on hightlights view
# Images on highlights view
def populate_article_metadata(self, article, soup, first):
if first and hasattr(self, 'add_toc_thumbnail'):
picdiv = soup.find('img')

View File

@ -4,7 +4,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class DaytonBeachNewsJournal(BasicNewsRecipe):
title = 'Daytona Beach News Journal'
__author__ = 'BRGriff'
pubisher = 'News-JournalOnline.com'
publisher = 'News-JournalOnline.com'
description = 'Daytona Beach, Florida, Newspaper'
category = 'News, Daytona Beach, Florida'
oldest_article = 1

View File

@ -3,7 +3,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
# 1: Base Version
# 2: Added rules for wdr.de, ndr.de, br-online.de
# 3: Added rules for rbb-online.de, boerse.ard.de, sportschau.de
# 4: New design of tagesschau.de implemented. Simplyfied.
# 4: New design of tagesschau.de implemented. Simplified.
# 5: Taken out the pictures.

View File

@ -105,8 +105,8 @@ class DunyaHalleri(BasicNewsRecipe):
p.append(img)
div.replaceWith(p)
# youtube embeded normalization
# this block finds the cover image for each embeded youtube video then
# youtube embedded normalization
# this block finds the cover image for each embedded youtube video then
# changes it to "a href" and "img"
for iframe in soup.findAll('iframe'):
a = new_tag(soup, 'a')
@ -124,7 +124,7 @@ class DunyaHalleri(BasicNewsRecipe):
a_href = 'https://www.youtube.com/watch?v=' + m.group('vid')
else:
# not youtube
# default cover image for non-youtube embeded pages
# default cover image for non-youtube embedded pages
img_src = 'http://www.warnerclassics.com/img_style/default_video_m.jpg'
a_href = iframe['src']

View File

@ -170,8 +170,8 @@ class DunyaHalleri_HaftaninOzeti(BasicNewsRecipe):
p.append(img)
div.replaceWith(p)
# youtube embeded normalization
# this block finds the cover image for each embeded youtube video then
# youtube embedded normalization
# this block finds the cover image for each embedded youtube video then
# changes it to "a href" and "img"
for iframe in soup.findAll('iframe'):
a = new_tag(soup, 'a')
@ -189,7 +189,7 @@ class DunyaHalleri_HaftaninOzeti(BasicNewsRecipe):
a_href = 'https://www.youtube.com/watch?v=' + m.group('vid')
else:
# not youtube
# default cover image for non-youtube embeded pages
# default cover image for non-youtube embedded pages
img_src = 'http://www.warnerclassics.com/img_style/default_video_m.jpg'
a_href = iframe['src']

View File

@ -162,6 +162,6 @@ class eenadu(BasicNewsRecipe):
today = datetime.now()
if (today - date) > timedelta(1.5):
self.abort_article('Skipping old article')
else: # may not be an artilce.
else: # may not be an article.
self.abort_article()
return soup

View File

@ -21,7 +21,7 @@ class NYTimes(BasicNewsRecipe):
def nejm_get_index(self):
return self.index_to_soup('http://www.focus.pl/')
# To parse artice toc
# To parse article toc
def parse_index(self):
soup = self.nejm_get_index()

View File

@ -64,7 +64,7 @@ class gazetaprawna(BasicNewsRecipe):
]
def parse_feeds(self):
self.log(_('Gazeta Prawna overrided parse_feeds()'))
self.log(_('Gazeta Prawna overrode parse_feeds()'))
parsed_feeds = BasicNewsRecipe.parse_feeds(self)
for n, feed in enumerate(parsed_feeds):
for a, article in enumerate(feed):

View File

@ -13,16 +13,16 @@ terms_to_search_for = (
class google_news_de(BasicNewsRecipe):
# Titel of the Recipe - this is a sample
# Title of the Recipe - this is a sample
title = 'Google News'
cover_url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/d/da/Google_News_icon.svg/500px-Google_News_icon.svg.png'
# Author
__author__ = 'Volker Heggemann, VoHe'
# oldes article to download (in days) ---- can be edit by user
# oldest article to download (in days) ---- can be edit by user
oldest_article = 2
# describes itself, ---- can be edit by user
max_articles_per_feed = 200
# speed up the download on fast computers be carefull (I test max.20)
# speed up the download on fast computers be careful (I test max.20)
# ---- can be edit by user
simultaneous_downloads = 10
# description, some Reader show this in titlepage
@ -64,7 +64,7 @@ class google_news_de(BasicNewsRecipe):
)
for searchfor in terms_to_search_for:
self.feeds.append(
('Google news intrested in ' + searchfor,
('Google news interested in ' + searchfor,
'https://news.google.com/news?cf=all&hl=' + country_code +
'+&pz=1&ned=' + country_code + '&q=' + searchfor + '&output=rss'))
return BasicNewsRecipe.get_feeds(self)

View File

@ -19,7 +19,7 @@ class AdvancedUserRecipe1277305250(BasicNewsRecipe):
__author__ = 'rty'
__version__ = '1.0'
language = 'zh'
pubisher = 'http://www.infzm.com'
publisher = 'http://www.infzm.com'
description = 'Chinese Weekly Tabloid'
category = 'News, China'
remove_javascript = True

View File

@ -15,7 +15,7 @@ class JakartaGlobe(BasicNewsRecipe):
(u'Sports', u'http://www.thejakartaglobe.com/sports/feed/'),
]
__author__ = 'rty'
pubisher = 'JakartaGlobe.com'
publisher = 'JakartaGlobe.com'
description = 'JakartaGlobe, Indonesia, Newspaper'
category = 'News, Indonesia'

View File

@ -31,7 +31,7 @@ class JournalofHospitalMedicine(BasicNewsRecipe):
def johm_get_index(self):
return self.index_to_soup('https://shmpublications.onlinelibrary.wiley.com/toc/15535606/current')
# To parse artice toc
# To parse article toc
def parse_index(self):
soup = self.johm_get_index()
toc = soup.find(id='issueTocGroups')

View File

@ -59,7 +59,7 @@ class JerusalemPost(BasicNewsRecipe):
# ------------------------------------------
return print_url
# example of how links should be formated
# example of how links should be formatted
# -------------------------------------------------------------------------
# org version = http://fr.jpost.com/servlet/Satellite?pagename=JFrench/JPArticle/ShowFull&cid=1282804806075
# print version = http://fr.jpost.com/servlet/Satellite?cid=1282804806075&pagename=JFrench%2FJPArticle%2FPrinter

View File

@ -49,7 +49,7 @@ class KANewsRecipe(BasicNewsRecipe):
dict(name=['span'], attrs={'class': 'comm_info'}),
dict(name=['h3'], attrs={'id': 'artdetail_unterzeile'})]
# removing style attribute _after_ removing specifig tags above
# removing style attribute _after_ removing specific tags above
remove_attributes = ['width', 'height', 'style']
extra_css = '''

View File

@ -50,7 +50,7 @@ class JASN(BasicNewsRecipe):
def jasn_get_index(self):
return self.index_to_soup('http://jasn.asnjournals.org/current.shtml')
# To parse artice toc
# To parse article toc
def parse_index(self):
parse_soup = self.jasn_get_index()

View File

@ -6,13 +6,13 @@ from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1591780224(BasicNewsRecipe):
title = 'Linux News'
# Titel of the Recipe
# Title of the Recipe
cover_url = 'https://linuxnews.de/wp-content/themes/linuxnews/images/linuxnews-logo.png'
# Author
__author__ = 'Volker Heggemann, VoHe'
# oldes article to download (in days)
# oldest article to download (in days)
oldest_article = 4
# speed up the download on fast computers be carefull (I test max.20)
# speed up the download on fast computers be careful (I test max.20)
# ---- can be edit by user
simultaneous_downloads = 10
# description, some Reader show this in titlepage

View File

@ -7,7 +7,7 @@ class LondonFreePress(BasicNewsRecipe):
oldest_article = 4
max_articles_per_feed = 100
pubisher = 'lfpress.com'
publisher = 'lfpress.com'
description = 'Ontario Canada Newspaper'
category = 'News, Ontario, Canada'
remove_javascript = True

View File

@ -5,7 +5,7 @@
# 1) Summary of the article are noow available
# 2) Additional sections International, France, Economie and Culture have
# been added through custom entries in the function my_parse_index.
# 3) Fix the cover image so it doesnt disappear from the Kindle menu
# 3) Fix the cover image so it doesn't disappear from the Kindle menu
# ( cover image format is changed to .jpeg)
# 14 Jan 2021 - Add Mediapart Logo url as masthead_url and change cover
# by overlaying the date on top of the Mediapart cover
@ -69,7 +69,7 @@ class Mediapart(BasicNewsRecipe):
# The feed at 'http://www.mediapart.fr/articles/feed' only displayed the 10
# last elements so the articles are indexed on specific pages
# in the function my_parse_index. In this function the article are parsed
# using the funtion get_articles and the dict values dict_article_sources
# using the function get_articles and the dict values dict_article_sources
def parse_feeds(self):
feeds = super(Mediapart, self).parse_feeds()

View File

@ -33,7 +33,7 @@ from calibre.utils.magick import Image
Changed a lot of custom code into calibre code as the default code of calibre has become much faster since the first version fo this recipe
Added new feeds
Updated css
Changed order of regex to speedup proces
Changed order of regex to speedup process
Version 1.9.3 23-05-2012
Updated Cover image
Version 1.9.4 19-04-2013

View File

@ -6,17 +6,17 @@ from datetime import datetime
class MyDealzDE(BasicNewsRecipe):
# Titel of the Recipe
# Title of the Recipe
title = 'MyDealz'
# Author
__author__ = 'Volker Heggemann, VoHe'
# oldes article to download (in days) ---- can be edit by user
# oldest article to download (in days) ---- can be edit by user
oldest_article = 5
# describes itself, ---- can be edit by user
max_articles_per_feed = 100
# Cover Picture
cover_url = 'https://pbs.twimg.com/profile_images/817053687545741313/0wFqvfqC_400x400.jpg'
# speed up the download on fast computers be carefull (I test max.20)
# speed up the download on fast computers be careful (I test max.20)
# ---- can be edit by user
simultaneous_downloads = 10
# description, some Reader show this in titlepage

View File

@ -4,7 +4,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class NBOnline(BasicNewsRecipe):
title = u'Nikkei Business Online'
language = 'ja'
description = u'Nikkei Business Online.\u6CE8\uFF1A\u30E6\u30FC\u30B6\u30FC\u540D\u306Bemail\u30A2\u30C9\u30EC\u30B9\u3068\u30E6\u30FC\u30B6\u30FC\u540D\u3092\u30BB\u30DF\u30B3\u30ED\u30F3\u3067\u533A\u5207\u3063\u3066\u5165\u308C\u3066\u304F\u3060\u3055\u3044\u3002\u4F8B\uFF1Aemail@address.jp;username . PLEASE NOTE: You need to put your email address and username into username filed separeted by ; (semi-colon).' # noqa
description = u'Nikkei Business Online.\u6CE8\uFF1A\u30E6\u30FC\u30B6\u30FC\u540D\u306Bemail\u30A2\u30C9\u30EC\u30B9\u3068\u30E6\u30FC\u30B6\u30FC\u540D\u3092\u30BB\u30DF\u30B3\u30ED\u30F3\u3067\u533A\u5207\u3063\u3066\u5165\u308C\u3066\u304F\u3060\u3055\u3044\u3002\u4F8B\uFF1Aemail@address.jp;username . PLEASE NOTE: You need to put your email address and username into username field separated by ; (semi-colon).' # noqa
__author__ = 'Ado Nishimura'
needs_subscription = True
oldest_article = 7

View File

@ -41,7 +41,7 @@ class NEJM(BasicNewsRecipe):
def nejm_get_index(self):
return self.index_to_soup('https://www.nejm.org/toc/nejm/medical-journal')
# To parse artice toc
# To parse article toc
def parse_index(self):
soup = self.nejm_get_index()
feeds = []

View File

@ -324,7 +324,7 @@ class NewYorkTimes(BasicNewsRecipe):
return self.parse_web_sections()
return self.parse_todays_page()
# The NYT occassionally returns bogus articles for some reason just in case
# The NYT occasionally returns bogus articles for some reason just in case
# it is because of cookies, dont store cookies
def get_browser(self, *args, **kwargs):
return self

View File

@ -324,7 +324,7 @@ class NewYorkTimes(BasicNewsRecipe):
return self.parse_web_sections()
return self.parse_todays_page()
# The NYT occassionally returns bogus articles for some reason just in case
# The NYT occasionally returns bogus articles for some reason just in case
# it is because of cookies, dont store cookies
def get_browser(self, *args, **kwargs):
return self

View File

@ -22,7 +22,7 @@ class AdvancedUserRecipe1279258912(BasicNewsRecipe):
u'http://feeds.feedburner.com/orlandosentinel/features/lifestyle'),
]
__author__ = 'rty'
pubisher = 'OrlandoSentinel.com'
publisher = 'OrlandoSentinel.com'
description = 'Orlando, Florida, Newspaper'
category = 'News, Orlando, Florida'

View File

@ -11,7 +11,7 @@ class AdvancedUserRecipe1277129332(BasicNewsRecipe):
max_articles_per_feed = 100
__author__ = 'ceapas'
pubisher = 'people.com.cn'
publisher = 'people.com.cn'
description = 'People Daily Newspaper'
language = 'zh'
category = 'News, China'

View File

@ -6,15 +6,15 @@ from datetime import datetime
class PressePortalDE(BasicNewsRecipe):
# Titel of the Recipe
# Title of the Recipe
title = 'Presseportal DE'
# Author
__author__ = 'Volker Heggemann, VoHe'
# oldes article to download (in days) ---- can be edit by user
# oldest article to download (in days) ---- can be edit by user
oldest_article = 1
# describes itself, ---- can be edit by user
max_articles_per_feed = 100
# speed up the download on fast computers be carefull (I test max.20)
# speed up the download on fast computers be careful (I test max.20)
# ---- can be edit by user
simultaneous_downloads = 10
# description, some Reader show this in titlepage

View File

@ -25,7 +25,7 @@ class TodaysZaman_en(BasicNewsRecipe):
# def preprocess_html(self, soup):
# return self.adeify_images(soup)
# def print_version(self, url): #there is a probem caused by table format
# def print_version(self, url): #there is a problem caused by table format
# return
# url.replace('http://www.todayszaman.com/newsDetail_getNewsById.action?load=detay&',
# 'http://www.todayszaman.com/newsDetail_openPrintPage.action?')

View File

@ -15,16 +15,16 @@ feel free to modify this to your own needs
class Spiegel_DE_all(BasicNewsRecipe):
# Titel of the Recipe
# Title of the Recipe
title = u'Spiegel Online RSS - German alle Themen'
# Author
__author__ = u'Volker Heggemann, VoHe'
# oldes article to download (in days) ---- can be edit by user
# oldest article to download (in days) ---- can be edit by user
# be careful, if there is a lot of news, the file size exceeds!
oldest_article = 7
# describes itself, ---- can be edit by user
max_articles_per_feed = 100
# speed up the download on fast computers be carefull (I test max.20)
# speed up the download on fast computers be careful (I test max.20)
# ---- can be edit by user
simultaneous_downloads = 10
# description, some Reader show this in titlepage

View File

@ -24,7 +24,7 @@ class T_Online_Recipe(BasicNewsRecipe):
masthead_url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/3/31/T-online.de.svg/1280px-T-online.de.svg.png'
# all possible feeds look at:
# https://www.t-online.de/themen/rss-feed/
# there are tons of feeds, may you just type the words of intrest after the http://feeds.t-online.de/rss/ link.
# there are tons of feeds, just type the words of interest after the http://feeds.t-online.de/rss/ link.
feeds = [
('Germany', 'http://feeds.t-online.de/rss/deutschland'),
('NEWS', 'http://feeds.t-online.de/rss/nachrichten'),

View File

@ -4,7 +4,7 @@ import re
# 1: Base Version
# 2: Added rules for wdr.de, ndr.de, br-online.de
# 3: Added rules for rbb-online.de, boerse.ard.de, sportschau.de
# 4: New design of tagesschau.de implemented. Simplyfied.
# 4: New design of tagesschau.de implemented. Simplified.
# 5: Taken out the pictures.

View File

@ -19,7 +19,7 @@ class TheBudgetFashionista(BasicNewsRecipe):
use_embedded_content = False
encoding = 'utf-8'
publisher = 'TBF GROUP, LLC.'
category = 'news, fashion, comsetics, women'
category = 'news, fashion, cosmetics, women'
lang = 'en-US'
language = 'en'
auto_cleanup = True

View File

@ -4,7 +4,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1278773519(BasicNewsRecipe):
title = u'Waco Tribune Herald'
__author__ = 'rty'
pubisher = 'A Robinson Media Company'
publisher = 'A Robinson Media Company'
description = 'Waco, Texas, Newspaper'
category = 'News, Texas, Waco'
oldest_article = 7

View File

@ -53,7 +53,7 @@ class CanWestPaper(BasicNewsRecipe):
dict(name='li', attrs={'class': 'print'}), dict(name='li', attrs={'class': 'share'}), dict(name='ul', attrs={'class': 'bullet'})]
def preprocess_html(self, soup):
# delete iempty id attributes--they screw up the TOC for unknow reasons
# delete iempty id attributes--they screw up the TOC for unknown reasons
divtags = soup.findAll('div', attrs={'id': ''})
if divtags:
for div in divtags:

View File

@ -6,7 +6,7 @@ class AdvancedUserRecipe1277647803(BasicNewsRecipe):
__author__ = 'rty'
__version__ = '1.0'
oldest_article = 2
pubisher = 'www.winnipegsun.com'
publisher = 'www.winnipegsun.com'
description = 'Winnipeg Newspaper'
category = 'News, Winnipeg, Canada'
max_articles_per_feed = 100

View File

@ -28,5 +28,5 @@ class Yagmur(BasicNewsRecipe):
(u'Yagmur', u'http://open.dapper.net/services/yagmur'),
]
def print_version(self, url): # there is a probem caused by table format
def print_version(self, url): # there is a problem caused by table format
return url.replace('http://www.yagmurdergisi.com.tr/konu_goster.php?konu_id=', 'http://www.yagmurdergisi.com.tr/yazformati.php?konu_id=')

View File

@ -27,5 +27,5 @@ class YeniUmit(BasicNewsRecipe):
(u'Yeni Umit', u'http://open.dapper.net/services/yeniumit'),
]
def print_version(self, url): # there is a probem caused by table format
def print_version(self, url): # there is a problem caused by table format
return url.replace('http://www.yeniumit.com.tr/konular', 'http://www.yeniumit.com.tr/yazdir')

View File

@ -95,7 +95,7 @@ class ZAOBAO(BasicNewsRecipe):
return soup
def parse_feeds(self):
self.log(_('ZAOBAO overrided parse_feeds()'))
self.log(_('ZAOBAO overrode parse_feeds()'))
parsed_feeds = BasicNewsRecipe.parse_feeds(self)
for id, obj in enumerate(self.INDEXES):