mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-07 10:14:46 -04:00
Fix #7076 (The Times Online Empty Articles + Login/Password Fields Missing)
This commit is contained in:
parent
bfc753f794
commit
09590d9942
@ -1,103 +1,106 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2008-2009, Darko Miletic <darko.miletic at gmail.com>'
|
__copyright__ = '2009-2010, Darko Miletic <darko.miletic at gmail.com>'
|
||||||
'''
|
'''
|
||||||
timesonline.co.uk
|
www.thetimes.co.uk
|
||||||
'''
|
'''
|
||||||
import re
|
import urllib
|
||||||
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
from calibre.ebooks.BeautifulSoup import Tag
|
|
||||||
|
|
||||||
class Timesonline(BasicNewsRecipe):
|
class TimesOnline(BasicNewsRecipe):
|
||||||
title = 'The Times Online'
|
title = 'The Times UK'
|
||||||
__author__ = 'Darko Miletic and Sujata Raman'
|
__author__ = 'Darko Miletic'
|
||||||
description = 'UK news'
|
description = 'news from United Kingdom and World'
|
||||||
publisher = 'timesonline.co.uk'
|
language = 'en_GB'
|
||||||
category = 'news, politics, UK'
|
publisher = 'Times Newspapers Ltd'
|
||||||
oldest_article = 2
|
category = 'news, politics, UK'
|
||||||
max_articles_per_feed = 100
|
oldest_article = 3
|
||||||
no_stylesheets = True
|
max_articles_per_feed = 100
|
||||||
use_embedded_content = False
|
no_stylesheets = True
|
||||||
simultaneous_downloads = 1
|
use_embedded_content = False
|
||||||
encoding = 'ISO-8859-1'
|
encoding = 'utf-8'
|
||||||
remove_javascript = True
|
delay = 1
|
||||||
language = 'en_GB'
|
needs_subscription = True
|
||||||
recursions = 9
|
publication_type = 'newspaper'
|
||||||
match_regexps = [r'http://www.timesonline.co.uk/.*page=[2-9]']
|
masthead_url = 'http://www.thetimes.co.uk/tto/public/img/the_times_460.gif'
|
||||||
|
INDEX = 'http://www.thetimes.co.uk'
|
||||||
|
PREFIX = u'http://www.thetimes.co.uk/tto/'
|
||||||
|
extra_css = """
|
||||||
|
.f-ha{font-size: xx-large; font-weight: bold}
|
||||||
|
.f-author{font-family: Arial,Helvetica,sans-serif}
|
||||||
|
.caption{font-size: small}
|
||||||
|
body{font-family: Georgia,"Times New Roman",Times,serif}
|
||||||
|
"""
|
||||||
|
conversion_options = {
|
||||||
|
'comment' : description
|
||||||
|
, 'tags' : category
|
||||||
|
, 'publisher' : publisher
|
||||||
|
, 'language' : language
|
||||||
|
}
|
||||||
|
|
||||||
preprocess_regexps = [(re.compile(r'<!--.*?-->', re.DOTALL), lambda m: '')]
|
|
||||||
|
|
||||||
keep_only_tags = [
|
def get_browser(self):
|
||||||
dict(name='div', attrs= {'id':['region-column1and2-layout2']}),
|
br = BasicNewsRecipe.get_browser()
|
||||||
{'class' : ['subheading']},
|
br.open('http://www.timesplus.co.uk/tto/news/?login=false&url=http://www.thetimes.co.uk/tto/news/?lightbox=false')
|
||||||
dict(name='div', attrs= {'id':['dynamic-image-holder']}),
|
if self.username is not None and self.password is not None:
|
||||||
dict(name='div', attrs= {'class':['article-author']}),
|
data = urllib.urlencode({ 'userName':self.username
|
||||||
dict(name='div', attrs= {'id':['related-article-links']}),
|
,'password':self.password
|
||||||
|
,'keepMeLoggedIn':'false'
|
||||||
|
})
|
||||||
|
br.open('https://www.timesplus.co.uk/iam/app/authenticate',data)
|
||||||
|
return br
|
||||||
|
|
||||||
|
remove_tags = [
|
||||||
|
dict(name=['object','link','iframe','base','meta'])
|
||||||
|
,dict(attrs={'class':'tto-counter' })
|
||||||
]
|
]
|
||||||
|
remove_attributes=['lang']
|
||||||
|
keep_only_tags = [
|
||||||
|
dict(attrs={'class':'heading' })
|
||||||
|
,dict(attrs={'class':'f-author'})
|
||||||
|
,dict(attrs={'id':'bodycopy'})
|
||||||
|
]
|
||||||
|
|
||||||
remove_tags = [
|
feeds = [
|
||||||
dict(name=['embed','object','form','iframe']),
|
(u'UK News' , PREFIX + u'news/uk/?view=list' )
|
||||||
dict(name='span', attrs = {'class':'float-left padding-left-8 padding-top-2'}),
|
,(u'World' , PREFIX + u'news/world/?view=list' )
|
||||||
dict(name='div', attrs= {'id':['region-footer','region-column2-layout2','grid-column4','login-status','comment-sort-order']}),
|
,(u'Politics' , PREFIX + u'news/politics/?view=list')
|
||||||
dict(name='div', attrs= {'class': ['debate-quote-container','clear','your-comment','float-left related-attachements-container','float-left padding-bottom-5 padding-top-8','puff-top']}),
|
,(u'Health' , PREFIX + u'health/news/?view=list' )
|
||||||
dict(name='span', attrs = {'id': ['comment-count']}),
|
,(u'Education' , PREFIX + u'education/?view=list' )
|
||||||
dict(name='ul',attrs = {'id': 'read-all-comments'}),
|
,(u'Technology' , PREFIX + u'technology/?view=list' )
|
||||||
dict(name='a', attrs = {'class':'reg-bold'}),
|
,(u'Science' , PREFIX + u'science/?view=list' )
|
||||||
]
|
,(u'Environment' , PREFIX + u'environment/?view=list' )
|
||||||
|
,(u'Faith' , PREFIX + u'faith/?view=list' )
|
||||||
|
,(u'Opinion' , PREFIX + u'opinion/?view=list' )
|
||||||
extra_css = '''
|
,(u'Sport' , PREFIX + u'sport/?view=list' )
|
||||||
.small{font-family :Arial,Helvetica,sans-serif; font-size:x-small;}
|
,(u'Business' , PREFIX + u'business/?view=list' )
|
||||||
.byline{font-family :Arial,Helvetica,sans-serif; font-size:x-small; background:#F8F1D8;}
|
,(u'Money' , PREFIX + u'money/?view=list' )
|
||||||
.color-666{font-family :Arial,Helvetica,sans-serif; font-size:x-small; color:#666666; }
|
,(u'Life' , PREFIX + u'life/?view=list' )
|
||||||
h1{font-family:Georgia,Times New Roman,Times,serif;font-size:large; }
|
,(u'Arts' , PREFIX + u'arts/?view=list' )
|
||||||
.color-999 {color:#999999;}
|
]
|
||||||
.x-small {font-size:x-small;}
|
|
||||||
#related-article-links{font-family :Arial,Helvetica,sans-serif; font-size:small;}
|
|
||||||
h2{color:#333333;font-family :Georgia,Times New Roman,Times,serif; font-size:small;}
|
|
||||||
p{font-family :Arial,Helvetica,sans-serif; font-size:small;}
|
|
||||||
'''
|
|
||||||
|
|
||||||
feeds = [
|
|
||||||
(u'Top stories from Times Online', u'http://www.timesonline.co.uk/tol/feeds/rss/topstories.xml' ),
|
|
||||||
('Latest Business News', 'http://www.timesonline.co.uk/tol/feeds/rss/business.xml'),
|
|
||||||
('Economics', 'http://www.timesonline.co.uk/tol/feeds/rss/economics.xml'),
|
|
||||||
('World News', 'http://www.timesonline.co.uk/tol/feeds/rss/worldnews.xml'),
|
|
||||||
('UK News', 'http://www.timesonline.co.uk/tol/feeds/rss/uknews.xml'),
|
|
||||||
('Travel News', 'http://www.timesonline.co.uk/tol/feeds/rss/travel.xml'),
|
|
||||||
('Sports News', 'http://www.timesonline.co.uk/tol/feeds/rss/sport.xml'),
|
|
||||||
('Film News', 'http://www.timesonline.co.uk/tol/feeds/rss/film.xml'),
|
|
||||||
('Tech news', 'http://www.timesonline.co.uk/tol/feeds/rss/tech.xml'),
|
|
||||||
('Literary Supplement', 'http://www.timesonline.co.uk/tol/feeds/rss/thetls.xml'),
|
|
||||||
]
|
|
||||||
|
|
||||||
def get_cover_url(self):
|
|
||||||
cover_url = None
|
|
||||||
index = 'http://www.timesonline.co.uk/tol/newspapers/'
|
|
||||||
soup = self.index_to_soup(index)
|
|
||||||
link_item = soup.find(name = 'div',attrs ={'class': "float-left margin-right-15"})
|
|
||||||
if link_item:
|
|
||||||
cover_url = link_item.img['src']
|
|
||||||
return cover_url
|
|
||||||
|
|
||||||
def get_article_url(self, article):
|
|
||||||
return article.get('guid', None)
|
|
||||||
|
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
def preprocess_html(self, soup):
|
||||||
soup.html['xml:lang'] = self.language
|
for item in soup.findAll(style=True):
|
||||||
soup.html['lang'] = self.language
|
del item['style']
|
||||||
mlang = Tag(soup,'meta',[("http-equiv","Content-Language"),("content",self.language)])
|
|
||||||
mcharset = Tag(soup,'meta',[("http-equiv","Content-Type"),("content","text/html; charset=ISO-8859-1")])
|
|
||||||
soup.head.insert(0,mlang)
|
|
||||||
soup.head.insert(1,mcharset)
|
|
||||||
return self.adeify_images(soup)
|
return self.adeify_images(soup)
|
||||||
|
|
||||||
def postprocess_html(self,soup,first):
|
def parse_index(self):
|
||||||
for tag in soup.findAll(text = ['Previous Page','Next Page']):
|
totalfeeds = []
|
||||||
tag.extract()
|
lfeeds = self.get_feeds()
|
||||||
return soup
|
for feedobj in lfeeds:
|
||||||
|
feedtitle, feedurl = feedobj
|
||||||
|
self.report_progress(0, _('Fetching feed')+' %s...'%(feedtitle if feedtitle else feedurl))
|
||||||
|
articles = []
|
||||||
|
soup = self.index_to_soup(feedurl)
|
||||||
|
for item in soup.findAll('td', attrs={'class':'title'}):
|
||||||
|
atag = item.find('a')
|
||||||
|
url = self.INDEX + atag['href']
|
||||||
|
title = self.tag_to_string(atag)
|
||||||
|
articles.append({
|
||||||
|
'title' :title
|
||||||
|
,'date' :''
|
||||||
|
,'url' :url
|
||||||
|
,'description':''
|
||||||
|
})
|
||||||
|
totalfeeds.append((feedtitle, articles))
|
||||||
|
return totalfeeds
|
||||||
|
Loading…
x
Reference in New Issue
Block a user