Merge from trunk

This commit is contained in:
Charles Haley 2011-10-15 12:02:26 +02:00
commit 1de538588a
12 changed files with 81 additions and 73 deletions

View File

@ -110,8 +110,10 @@ class BrandEins(BasicNewsRecipe):
selected_issue = issue_map[selected_issue_key]
url = selected_issue.get('href', False)
# Get the title for the magazin - build it out of the title of the cover - take the issue and year;
self.title = "brand eins " + selected_issue_key[4:] + "/" + selected_issue_key[0:4]
# self.title = "brand eins " + selected_issue_key[4:] + "/" + selected_issue_key[0:4]
# Get the alternative title for the magazin - build it out of the title of the cover - without the issue and year;
url = 'http://brandeins.de/'+url
self.timefmt = ' ' + selected_issue_key[4:] + '/' + selected_issue_key[:4]
# url = "http://www.brandeins.de/archiv/magazin/tierisch.html"
titles_and_articles = self.brand_eins_parse_issue(url)
@ -163,4 +165,3 @@ class BrandEins(BasicNewsRecipe):
current_articles.append({'title': title, 'url': url, 'description': description, 'date':''})
titles_and_articles.append([chapter_title, current_articles])
return titles_and_articles

View File

@ -8,11 +8,7 @@ class DallasNews(BasicNewsRecipe):
no_stylesheets = True
use_embedded_content = False
remove_tags_before = dict(name='h1')
keep_only_tags = {'class':lambda x: x and 'article' in x}
remove_tags = [
{'class':['DMNSocialTools', 'article ', 'article first ', 'article premium']},
]
auto_cleanup = True
feeds = [
('Local News',

View File

@ -16,7 +16,7 @@ class FTDe(BasicNewsRecipe):
use_embedded_content = False
timefmt = ' [%d %b %Y]'
language = 'de'
max_articles_per_feed = 40
max_articles_per_feed = 30
no_stylesheets = True
remove_tags = [dict(id='navi_top'),
@ -84,17 +84,17 @@ class FTDe(BasicNewsRecipe):
dict(name='div', attrs={'class':'artikelsplitfaq'})]
#remove_tags_after = [dict(name='a', attrs={'class':'more'})]
feeds = [ ('Finanzen', 'http://www.ftd.de/rss2/finanzen/maerkte'),
('Meinungshungrige', 'http://www.ftd.de/rss2/meinungshungrige'),
feeds = [
('Unternehmen', 'http://www.ftd.de/rss2/unternehmen'),
('Finanzen', 'http://www.ftd.de/rss2/finanzen/maerkte'),
('Meinungen', 'http://www.ftd.de/rss2/meinungshungrige'),
('Politik', 'http://www.ftd.de/rss2/politik'),
('Karriere_Management', 'http://www.ftd.de/rss2/karriere-management'),
('IT_Medien', 'http://www.ftd.de/rss2/it-medien'),
('Management & Karriere', 'http://www.ftd.de/rss2/karriere-management'),
('IT & Medien', 'http://www.ftd.de/rss2/it-medien'),
('Wissen', 'http://www.ftd.de/rss2/wissen'),
('Sport', 'http://www.ftd.de/rss2/sport'),
('Auto', 'http://www.ftd.de/rss2/auto'),
('Lifestyle', 'http://www.ftd.de/rss2/lifestyle')
]

View File

@ -11,21 +11,12 @@ class AdvancedUserRecipe1298137661(BasicNewsRecipe):
conversion_options = {
'linearize_tables' : True
}
remove_tags = [
dict(name='a', attrs={'id':'articleCommentUrl'}),
dict(name='p', attrs={'class':'newsSummary'}),
dict(name='div', attrs={'class':'headerTools'})
]
keep_only_tags = [dict(name='div', attrs={'id':'main-content'}),
dict(name='div', attrs={'class':'contentNewsArticle'})]
feeds = [(u'Uutiset - HS.fi', u'http://www.hs.fi/uutiset/rss/'), (u'Politiikka - HS.fi', u'http://www.hs.fi/politiikka/rss/'),
feeds = [(u'Uutiset - HS.fi', u'http://www.hs.fi/uutiset/rss/')
, (u'Politiikka - HS.fi', u'http://www.hs.fi/politiikka/rss/'),
(u'Ulkomaat - HS.fi', u'http://www.hs.fi/ulkomaat/rss/'), (u'Kulttuuri - HS.fi', u'http://www.hs.fi/kulttuuri/rss/'),
(u'Kirjat - HS.fi', u'http://www.hs.fi/kulttuuri/kirjat/rss/'), (u'Elokuvat - HS.fi', u'http://www.hs.fi/kulttuuri/elokuvat/rss/')
]
def print_version(self, url):
j = url.rfind("/")
s = url[j:]
i = s.rfind("?ref=rss")
if i > 0:
s = s[:i]
return "http://www.hs.fi/tulosta" + s

Binary file not shown.

After

Width:  |  Height:  |  Size: 868 B

View File

@ -1,34 +1,38 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__author__ = 'Lorenzo Vigentini, based on Darko Miletic, Gabriele Marini'
__copyright__ = '2009, Darko Miletic <darko.miletic at gmail.com>, Lorenzo Vigentini <l.vigentini at gmail.com>'
__copyright__ = '2009-2011, Darko Miletic <darko.miletic at gmail.com>, Lorenzo Vigentini <l.vigentini at gmail.com>'
description = 'Italian daily newspaper - v1.01 (04, January 2010); 16.05.2010 new version'
'''
http://www.repubblica.it/
'''
import re
from calibre.web.feeds.news import BasicNewsRecipe
class LaRepubblica(BasicNewsRecipe):
__author__ = 'Lorenzo Vigentini, Gabriele Marini'
description = 'Italian daily newspaper'
cover_url = 'http://www.repubblica.it/images/homepage/la_repubblica_logo.gif'
title = u'La Repubblica'
title = 'La Repubblica'
__author__ = 'Lorenzo Vigentini, Gabriele Marini, Darko Miletic'
description = 'il quotidiano online con tutte le notizie in tempo reale. News e ultime notizie. Tutti i settori: politica, cronaca, economia, sport, esteri, scienza, tecnologia, internet, spettacoli, musica, cultura, arte, mostre, libri, dvd, vhs, concerti, cinema, attori, attrici, recensioni, chat, cucina, mappe. Le citta di Repubblica: Roma, Milano, Bologna, Firenze, Palermo, Napoli, Bari, Torino.'
masthead_url = 'http://www.repubblica.it/static/images/homepage/2010/la-repubblica-logo-home-payoff.png'
publisher = 'Gruppo editoriale L\'Espresso'
category = 'News, politics, culture, economy, general interest'
language = 'it'
timefmt = '[%a, %d %b, %Y]'
oldest_article = 5
max_articles_per_feed = 100
encoding = 'utf8'
use_embedded_content = False
recursion = 10
remove_javascript = True
#recursion = 10
no_stylesheets = True
extra_css = """
img{display: block}
"""
preprocess_regexps = [
(re.compile(r'.*?<head>', re.DOTALL|re.IGNORECASE), lambda match: '<head>'),
(re.compile(r'<head>.*?<title>', re.DOTALL|re.IGNORECASE), lambda match: '<head><title>'),
(re.compile(r'</title>.*?</head>', re.DOTALL|re.IGNORECASE), lambda match: '</title></head>')
]
def get_article_url(self, article):
link = article.get('id', article.get('guid', None))
@ -36,16 +40,16 @@ class LaRepubblica(BasicNewsRecipe):
return article
return link
keep_only_tags = [dict(name='div', attrs={'class':'articolo'}),
dict(name='div', attrs={'class':'body-text'}),
# dict(name='div', attrs={'class':'page-content'}),
keep_only_tags = [
dict(attrs={'class':'articolo'}),
dict(attrs={'class':'body-text'}),
dict(name='p', attrs={'class':'disclaimer clearfix'}),
dict(name='div', attrs={'id':'contA'})
dict(attrs={'id':'contA'})
]
remove_tags = [
dict(name=['object','link']),
dict(name=['object','link','meta']),
dict(name='span',attrs={'class':'linkindice'}),
dict(name='div', attrs={'class':'bottom-mobile'}),
dict(name='div', attrs={'id':['rssdiv','blocco']}),

View File

@ -10,7 +10,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class Sueddeutsche(BasicNewsRecipe):
title = u'Süddeutsche'
title = u'sueddeutsche.de'
description = 'News from Germany'
__author__ = 'Oliver Niesner and Armin Geller'
use_embedded_content = False
@ -62,7 +62,7 @@ class Sueddeutsche(BasicNewsRecipe):
(u'Sport', u'http://suche.sueddeutsche.de/query/%23/sort/-docdatetime/drilldown/%C2%A7ressort%3A%5ESport%24?output=rss'),
(u'Leben', u'http://suche.sueddeutsche.de/query/%23/sort/-docdatetime/drilldown/%C2%A7ressort%3A%5ELeben%24?output=rss'),
(u'Karriere', u'http://suche.sueddeutsche.de/query/%23/sort/-docdatetime/drilldown/%C2%A7ressort%3A%5EKarriere%24?output=rss'),
(u'München&Region', u'http://www.sueddeutsche.de/app/service/rss/ressort/muenchen/rss.xml'),
(u'M&uuml;nchen & Region', u'http://www.sueddeutsche.de/app/service/rss/ressort/muenchen/rss.xml'),
(u'Bayern', u'http://suche.sueddeutsche.de/query/%23/sort/-docdatetime/drilldown/%C2%A7ressort%3A%5EBayern%24?output=rss'),
(u'Medien', u'http://suche.sueddeutsche.de/query/%23/sort/-docdatetime/drilldown/%C2%A7ressort%3A%5EMedien%24?output=rss'),
(u'Digital', u'http://suche.sueddeutsche.de/query/%23/sort/-docdatetime/drilldown/%C2%A7ressort%3A%5EDigital%24?output=rss'),

View File

@ -3,7 +3,7 @@
from calibre.web.feeds.news import BasicNewsRecipe
class TelepolisNews(BasicNewsRecipe):
title = u'Telepolis (News+Artikel)'
title = u'Telepolis'
__author__ = 'syntaxis'
publisher = 'Heise Zeitschriften Verlag GmbH & Co KG'
description = 'News from Telepolis'
@ -15,11 +15,8 @@ class TelepolisNews(BasicNewsRecipe):
encoding = "utf-8"
language = 'de'
remove_empty_feeds = True
keep_only_tags = [dict(name = 'div',attrs={'class':'head'}),dict(name = 'div',attrs={'class':'leftbox'}),dict(name='td',attrs={'class':'strict'})]
remove_tags = [ dict(name='td',attrs={'class':'blogbottom'}),
dict(name='div',attrs={'class':'forum'}), dict(name='div',attrs={'class':'social'}),dict(name='div',attrs={'class':'blog-letter p-news'}),
@ -28,7 +25,6 @@ class TelepolisNews(BasicNewsRecipe):
remove_tags_after = [dict(name='span', attrs={'class':['breadcrumb']})]
feeds = [(u'News', u'http://www.heise.de/tp/news-atom.xml')]
html2lrf_options = [
@ -39,7 +35,6 @@ class TelepolisNews(BasicNewsRecipe):
html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"'
def preprocess_html(self, soup):
mtag = '<meta http-equiv="Content-Type" content="text/html; charset=' + self.encoding + '">'
soup.head.insert(0,mtag)

View File

@ -84,7 +84,7 @@ class PDNOVEL(USBMS):
FORMATS = ['epub', 'pdf']
VENDOR_ID = [0x18d1]
PRODUCT_ID = [0xb004]
PRODUCT_ID = [0xb004, 0xa004]
BCD = [0x224]
VENDOR_NAME = 'ANDROID'

View File

@ -204,6 +204,7 @@ def render_data(mi, use_roman_numbers=True, all_fields=False):
class CoverView(QWidget): # {{{
cover_changed = pyqtSignal(object, object)
cover_removed = pyqtSignal(object)
def __init__(self, vertical, parent=None):
QWidget.__init__(self, parent)
@ -289,10 +290,12 @@ class CoverView(QWidget): # {{{
cm = QMenu(self)
paste = cm.addAction(_('Paste Cover'))
copy = cm.addAction(_('Copy Cover'))
remove = cm.addAction(_('Remove Cover'))
if not QApplication.instance().clipboard().mimeData().hasImage():
paste.setEnabled(False)
copy.triggered.connect(self.copy_to_clipboard)
paste.triggered.connect(self.paste_from_clipboard)
remove.triggered.connect(self.remove_cover)
cm.exec_(ev.globalPos())
def copy_to_clipboard(self):
@ -315,6 +318,13 @@ class CoverView(QWidget): # {{{
self.cover_changed.emit(id_,
pixmap_to_data(pmap))
def remove_cover(self):
id_ = self.data.get('id', None)
self.pixmap = self.default_pixmap
self.do_layout()
self.update()
if id_ is not None:
self.cover_removed.emit(id_)
# }}}
@ -457,6 +467,7 @@ class BookDetails(QWidget): # {{{
remote_file_dropped = pyqtSignal(object, object)
files_dropped = pyqtSignal(object, object)
cover_changed = pyqtSignal(object, object)
cover_removed = pyqtSignal(object)
# Drag 'n drop {{{
DROPABBLE_EXTENSIONS = IMAGE_EXTENSIONS+BOOK_EXTENSIONS
@ -514,6 +525,7 @@ class BookDetails(QWidget): # {{{
self.cover_view = CoverView(vertical, self)
self.cover_view.cover_changed.connect(self.cover_changed.emit)
self.cover_view.cover_removed.connect(self.cover_removed.emit)
self._layout.addWidget(self.cover_view)
self.book_info = BookInfo(vertical, self)
self._layout.addWidget(self.book_info)

View File

@ -261,6 +261,8 @@ class LayoutMixin(object): # {{{
self.book_details.files_dropped.connect(self.iactions['Add Books'].files_dropped_on_book)
self.book_details.cover_changed.connect(self.bd_cover_changed,
type=Qt.QueuedConnection)
self.book_details.cover_removed.connect(self.bd_cover_removed,
type=Qt.QueuedConnection)
self.book_details.remote_file_dropped.connect(
self.iactions['Add Books'].remote_file_dropped_on_book,
type=Qt.QueuedConnection)
@ -279,6 +281,12 @@ class LayoutMixin(object): # {{{
if self.cover_flow:
self.cover_flow.dataChanged()
def bd_cover_removed(self, id_):
self.library_view.model().db.remove_cover(id_, commit=True,
notify=False)
if self.cover_flow:
self.cover_flow.dataChanged()
def save_layout_state(self):
for x in ('library', 'memory', 'card_a', 'card_b'):
getattr(self, x+'_view').save_state()

View File

@ -500,6 +500,7 @@ class JobsDialog(QDialog, Ui_JobsDialog):
def kill_job(self, *args):
rows = [index.row() for index in
self.jobs_view.selectionModel().selectedRows()]
if not rows:
return error_dialog(self, _('No job'),
_('No job selected'), show=True)
if question_dialog(self, _('Are you sure?'),