This commit is contained in:
Fabian Graßl 2010-10-08 12:33:36 +02:00
commit e90af71b77
8 changed files with 156 additions and 103 deletions

View File

@ -0,0 +1,30 @@
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1286477122(BasicNewsRecipe):
title = u'Il Fatto Quotidiano'
oldest_article = 7
max_articles_per_feed = 25
language = 'it'
__author__ = 'egilh'
feeds = [
(u'Politica & Palazzo', u'http://www.ilfattoquotidiano.it/category/politica-palazzo/feed/'),
(u'Giustizia & impunit\xe0', u'http://www.ilfattoquotidiano.it/category/giustizia-impunita/feed/'),
(u'Media & regime', u'http://www.ilfattoquotidiano.it/category/media-regime/feed/'),
(u'Economia & Lobby', u'http://www.ilfattoquotidiano.it/category/economia-lobby/feed/'),
(u'Lavoro & precari', u'http://www.ilfattoquotidiano.it/category/lavoro-precari/feed/'),
(u'Ambiente & Veleni', u'http://www.ilfattoquotidiano.it/category/ambiente-veleni/feed/'),
(u'Sport & miliardi', u'http://www.ilfattoquotidiano.it/category/sport-miliardi/feed/'),
(u'Cronaca', u'http://www.ilfattoquotidiano.it/category/cronaca/feed/'),
(u'Mondo', u'http://www.ilfattoquotidiano.it/category/mondo/feed/'),
(u'Societ\xe0', u'http://www.ilfattoquotidiano.it/category/societa/feed/'),
(u'Scuola', u'http://www.ilfattoquotidiano.it/category/scuola/feed/'),
(u'Tecno', u'http://www.ilfattoquotidiano.it/category/tecno/feed/'),
(u'Terza pagina', u'http://www.ilfattoquotidiano.it/category/terza-pagina/feed/'),
(u'Piacere quotidiano', u'http://www.ilfattoquotidiano.it/category/piacere-quotidiano/feed/'),
(u'Cervelli in fuga', u'http://www.ilfattoquotidiano.it/category/cervelli-in-fuga/feed/'),
(u'Documentati!', u'http://www.ilfattoquotidiano.it/category/documentati/feed/'),
(u'Misfatto', u'http://www.ilfattoquotidiano.it/category/misfatto/feed/')
]

View File

@ -1,103 +1,106 @@
#!/usr/bin/env python
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2008-2009, Darko Miletic <darko.miletic at gmail.com>' __copyright__ = '2009-2010, Darko Miletic <darko.miletic at gmail.com>'
''' '''
timesonline.co.uk www.thetimes.co.uk
''' '''
import re import urllib
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import Tag
class Timesonline(BasicNewsRecipe): class TimesOnline(BasicNewsRecipe):
title = 'The Times Online' title = 'The Times UK'
__author__ = 'Darko Miletic and Sujata Raman' __author__ = 'Darko Miletic'
description = 'UK news' description = 'news from United Kingdom and World'
publisher = 'timesonline.co.uk' language = 'en_GB'
publisher = 'Times Newspapers Ltd'
category = 'news, politics, UK' category = 'news, politics, UK'
oldest_article = 2 oldest_article = 3
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets = True no_stylesheets = True
use_embedded_content = False use_embedded_content = False
simultaneous_downloads = 1 encoding = 'utf-8'
encoding = 'ISO-8859-1' delay = 1
remove_javascript = True needs_subscription = True
language = 'en_GB' publication_type = 'newspaper'
recursions = 9 masthead_url = 'http://www.thetimes.co.uk/tto/public/img/the_times_460.gif'
match_regexps = [r'http://www.timesonline.co.uk/.*page=[2-9]'] INDEX = 'http://www.thetimes.co.uk'
PREFIX = u'http://www.thetimes.co.uk/tto/'
extra_css = """
.f-ha{font-size: xx-large; font-weight: bold}
.f-author{font-family: Arial,Helvetica,sans-serif}
.caption{font-size: small}
body{font-family: Georgia,"Times New Roman",Times,serif}
"""
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
}
preprocess_regexps = [(re.compile(r'<!--.*?-->', re.DOTALL), lambda m: '')]
keep_only_tags = [ def get_browser(self):
dict(name='div', attrs= {'id':['region-column1and2-layout2']}), br = BasicNewsRecipe.get_browser()
{'class' : ['subheading']}, br.open('http://www.timesplus.co.uk/tto/news/?login=false&url=http://www.thetimes.co.uk/tto/news/?lightbox=false')
dict(name='div', attrs= {'id':['dynamic-image-holder']}), if self.username is not None and self.password is not None:
dict(name='div', attrs= {'class':['article-author']}), data = urllib.urlencode({ 'userName':self.username
dict(name='div', attrs= {'id':['related-article-links']}), ,'password':self.password
] ,'keepMeLoggedIn':'false'
})
br.open('https://www.timesplus.co.uk/iam/app/authenticate',data)
return br
remove_tags = [ remove_tags = [
dict(name=['embed','object','form','iframe']), dict(name=['object','link','iframe','base','meta'])
dict(name='span', attrs = {'class':'float-left padding-left-8 padding-top-2'}), ,dict(attrs={'class':'tto-counter' })
dict(name='div', attrs= {'id':['region-footer','region-column2-layout2','grid-column4','login-status','comment-sort-order']}), ]
dict(name='div', attrs= {'class': ['debate-quote-container','clear','your-comment','float-left related-attachements-container','float-left padding-bottom-5 padding-top-8','puff-top']}), remove_attributes=['lang']
dict(name='span', attrs = {'id': ['comment-count']}), keep_only_tags = [
dict(name='ul',attrs = {'id': 'read-all-comments'}), dict(attrs={'class':'heading' })
dict(name='a', attrs = {'class':'reg-bold'}), ,dict(attrs={'class':'f-author'})
,dict(attrs={'id':'bodycopy'})
] ]
extra_css = '''
.small{font-family :Arial,Helvetica,sans-serif; font-size:x-small;}
.byline{font-family :Arial,Helvetica,sans-serif; font-size:x-small; background:#F8F1D8;}
.color-666{font-family :Arial,Helvetica,sans-serif; font-size:x-small; color:#666666; }
h1{font-family:Georgia,Times New Roman,Times,serif;font-size:large; }
.color-999 {color:#999999;}
.x-small {font-size:x-small;}
#related-article-links{font-family :Arial,Helvetica,sans-serif; font-size:small;}
h2{color:#333333;font-family :Georgia,Times New Roman,Times,serif; font-size:small;}
p{font-family :Arial,Helvetica,sans-serif; font-size:small;}
'''
feeds = [ feeds = [
(u'Top stories from Times Online', u'http://www.timesonline.co.uk/tol/feeds/rss/topstories.xml' ), (u'UK News' , PREFIX + u'news/uk/?view=list' )
('Latest Business News', 'http://www.timesonline.co.uk/tol/feeds/rss/business.xml'), ,(u'World' , PREFIX + u'news/world/?view=list' )
('Economics', 'http://www.timesonline.co.uk/tol/feeds/rss/economics.xml'), ,(u'Politics' , PREFIX + u'news/politics/?view=list')
('World News', 'http://www.timesonline.co.uk/tol/feeds/rss/worldnews.xml'), ,(u'Health' , PREFIX + u'health/news/?view=list' )
('UK News', 'http://www.timesonline.co.uk/tol/feeds/rss/uknews.xml'), ,(u'Education' , PREFIX + u'education/?view=list' )
('Travel News', 'http://www.timesonline.co.uk/tol/feeds/rss/travel.xml'), ,(u'Technology' , PREFIX + u'technology/?view=list' )
('Sports News', 'http://www.timesonline.co.uk/tol/feeds/rss/sport.xml'), ,(u'Science' , PREFIX + u'science/?view=list' )
('Film News', 'http://www.timesonline.co.uk/tol/feeds/rss/film.xml'), ,(u'Environment' , PREFIX + u'environment/?view=list' )
('Tech news', 'http://www.timesonline.co.uk/tol/feeds/rss/tech.xml'), ,(u'Faith' , PREFIX + u'faith/?view=list' )
('Literary Supplement', 'http://www.timesonline.co.uk/tol/feeds/rss/thetls.xml'), ,(u'Opinion' , PREFIX + u'opinion/?view=list' )
,(u'Sport' , PREFIX + u'sport/?view=list' )
,(u'Business' , PREFIX + u'business/?view=list' )
,(u'Money' , PREFIX + u'money/?view=list' )
,(u'Life' , PREFIX + u'life/?view=list' )
,(u'Arts' , PREFIX + u'arts/?view=list' )
] ]
def get_cover_url(self):
cover_url = None
index = 'http://www.timesonline.co.uk/tol/newspapers/'
soup = self.index_to_soup(index)
link_item = soup.find(name = 'div',attrs ={'class': "float-left margin-right-15"})
if link_item:
cover_url = link_item.img['src']
return cover_url
def get_article_url(self, article):
return article.get('guid', None)
def preprocess_html(self, soup): def preprocess_html(self, soup):
soup.html['xml:lang'] = self.language for item in soup.findAll(style=True):
soup.html['lang'] = self.language del item['style']
mlang = Tag(soup,'meta',[("http-equiv","Content-Language"),("content",self.language)])
mcharset = Tag(soup,'meta',[("http-equiv","Content-Type"),("content","text/html; charset=ISO-8859-1")])
soup.head.insert(0,mlang)
soup.head.insert(1,mcharset)
return self.adeify_images(soup) return self.adeify_images(soup)
def postprocess_html(self,soup,first): def parse_index(self):
for tag in soup.findAll(text = ['Previous Page','Next Page']): totalfeeds = []
tag.extract() lfeeds = self.get_feeds()
return soup for feedobj in lfeeds:
feedtitle, feedurl = feedobj
self.report_progress(0, _('Fetching feed')+' %s...'%(feedtitle if feedtitle else feedurl))
articles = []
soup = self.index_to_soup(feedurl)
for item in soup.findAll('td', attrs={'class':'title'}):
atag = item.find('a')
url = self.INDEX + atag['href']
title = self.tag_to_string(atag)
articles.append({
'title' :title
,'date' :''
,'url' :url
,'description':''
})
totalfeeds.append((feedtitle, articles))
return totalfeeds

View File

@ -19,7 +19,6 @@ from calibre.ebooks.metadata.epub import set_metadata
from calibre.library.server.utils import strftime from calibre.library.server.utils import strftime
from calibre.utils.config import config_dir, prefs from calibre.utils.config import config_dir, prefs
from calibre.utils.date import isoformat, now, parse_date from calibre.utils.date import isoformat, now, parse_date
from calibre.utils.localization import get_lang
from calibre.utils.logging import Log from calibre.utils.logging import Log
from calibre.utils.zipfile import ZipFile from calibre.utils.zipfile import ZipFile

View File

@ -101,7 +101,6 @@ STANDARD_METADATA_FIELDS = SOCIAL_METADATA_FIELDS.union(
CALIBRE_METADATA_FIELDS) CALIBRE_METADATA_FIELDS)
# Metadata fields that smart update must do special processing to copy. # Metadata fields that smart update must do special processing to copy.
SC_FIELDS_NOT_COPIED = frozenset(['title', 'title_sort', 'authors', SC_FIELDS_NOT_COPIED = frozenset(['title', 'title_sort', 'authors',
'author_sort', 'author_sort_map', 'author_sort', 'author_sort_map',
'cover_data', 'tags', 'language', 'cover_data', 'tags', 'language',
@ -124,5 +123,6 @@ SERIALIZABLE_FIELDS = SOCIAL_METADATA_FIELDS.union(
PUBLICATION_METADATA_FIELDS).union( PUBLICATION_METADATA_FIELDS).union(
CALIBRE_METADATA_FIELDS).union( CALIBRE_METADATA_FIELDS).union(
DEVICE_METADATA_FIELDS) - \ DEVICE_METADATA_FIELDS) - \
frozenset(['device_collections', 'formats']) frozenset(['device_collections', 'formats',
'cover_data'])
# these are rebuilt when needed # these are rebuilt when needed

View File

@ -147,26 +147,32 @@ class ChooseLibraryAction(InterfaceAction):
self.qs_locations = [i[1] for i in locations] self.qs_locations = [i[1] for i in locations]
self.rename_menu.clear() self.rename_menu.clear()
self.delete_menu.clear() self.delete_menu.clear()
quick_actions = [] quick_actions, rename_actions, delete_actions = [], [], []
for name, loc in locations: for name, loc in locations:
ac = self.quick_menu.addAction(name, Dispatcher(partial(self.switch_requested, ac = self.quick_menu.addAction(name, Dispatcher(partial(self.switch_requested,
loc))) loc)))
quick_actions.append(ac) quick_actions.append(ac)
self.rename_menu.addAction(name, Dispatcher(partial(self.rename_requested, ac = self.rename_menu.addAction(name, Dispatcher(partial(self.rename_requested,
name, loc))) name, loc)))
self.delete_menu.addAction(name, Dispatcher(partial(self.delete_requested, rename_actions.append(ac)
ac = self.delete_menu.addAction(name, Dispatcher(partial(self.delete_requested,
name, loc))) name, loc)))
delete_actions.append(ac)
qs_actions = []
for i, x in enumerate(locations[:len(self.switch_actions)]): for i, x in enumerate(locations[:len(self.switch_actions)]):
name, loc = x name, loc = x
ac = self.switch_actions[i] ac = self.switch_actions[i]
ac.setText(name) ac.setText(name)
ac.setVisible(True) ac.setVisible(True)
qs_actions.append(ac)
self.quick_menu_action.setVisible(bool(locations)) self.quick_menu_action.setVisible(bool(locations))
self.rename_menu_action.setVisible(bool(locations)) self.rename_menu_action.setVisible(bool(locations))
self.delete_menu_action.setVisible(bool(locations)) self.delete_menu_action.setVisible(bool(locations))
self.gui.location_manager.set_switch_actions(quick_actions) self.gui.location_manager.set_switch_actions(quick_actions,
rename_actions, delete_actions, qs_actions,
self.action_choose)
def location_selected(self, loc): def location_selected(self, loc):

View File

@ -24,7 +24,6 @@ class LocationManager(QObject): # {{{
locations_changed = pyqtSignal() locations_changed = pyqtSignal()
unmount_device = pyqtSignal() unmount_device = pyqtSignal()
location_selected = pyqtSignal(object) location_selected = pyqtSignal(object)
switch_actions_set = pyqtSignal(object)
def __init__(self, parent=None): def __init__(self, parent=None):
QObject.__init__(self, parent) QObject.__init__(self, parent)
@ -70,12 +69,23 @@ class LocationManager(QObject): # {{{
ac('cardb', _('Card B'), 'sd.png', ac('cardb', _('Card B'), 'sd.png',
_('Show books in storage card B')) _('Show books in storage card B'))
def set_switch_actions(self, actions): def set_switch_actions(self, quick_actions, rename_actions, delete_actions,
switch_actions, choose_action):
self.switch_menu = QMenu() self.switch_menu = QMenu()
for ac in actions: self.switch_menu.addAction(choose_action)
self.cs_menus = []
for t, acs in [(_('Quick switch'), quick_actions),
(_('Rename library'), rename_actions),
(_('Delete library'), delete_actions)]:
if acs:
self.cs_menus.append(QMenu(t))
for ac in acs:
self.cs_menus[-1].addAction(ac)
self.switch_menu.addMenu(self.cs_menus[-1])
self.switch_menu.addSeparator()
for ac in switch_actions:
self.switch_menu.addAction(ac) self.switch_menu.addAction(ac)
self.library_action.setMenu(self.switch_menu) self.library_action.setMenu(self.switch_menu)
self.switch_actions_set.emit(bool(actions))
def _location_selected(self, location, *args): def _location_selected(self, location, *args):
if location != self.current_location and hasattr(self, if location != self.current_location and hasattr(self,

View File

@ -144,10 +144,10 @@ class DownloadMetadata(Thread):
def commit_covers(self, all=False): def commit_covers(self, all=False):
if all: if all:
self.worker.jobs.put(False) self.worker.jobs.put((False, False))
while True: while True:
try: try:
id, fmi, ok, cdata = self.worker.results.get(False) id, fmi, ok, cdata = self.worker.results.get_nowait()
if ok: if ok:
self.fetched_covers[id] = cdata self.fetched_covers[id] = cdata
self.results.put((id, 'cover', ok, fmi.title)) self.results.put((id, 'cover', ok, fmi.title))
@ -210,6 +210,12 @@ class DoDownload(QObject):
pass pass
if not self.downloader.is_alive(): if not self.downloader.is_alive():
self.timer.stop() self.timer.stop()
while True:
try:
r = self.downloader.results.get_nowait()
self.handle_result(r)
except Empty:
break
self.pd.accept() self.pd.accept()
def handle_result(self, r): def handle_result(self, r):

View File

@ -1010,7 +1010,6 @@ def command_restore_database(args, dbpath):
'saved to', name) 'saved to', name)
def list_categories_option_parser(): def list_categories_option_parser():
from calibre.library.check_library import CHECKS
parser = get_parser(_('''\ parser = get_parser(_('''\
%prog list_categories [options] %prog list_categories [options]