mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Merge with the latest trunk
This commit is contained in:
commit
fe7abd71f0
@ -19,6 +19,80 @@
|
||||
# new recipes:
|
||||
# - title:
|
||||
|
||||
- version: 0.8.52
|
||||
date: 2012-05-18
|
||||
|
||||
new features:
|
||||
- title: "EPUB Input: When setting the cover for a book that identifies its cover image, but not the html wrapper around the cover, try to detect and remove that wrapper automatically."
|
||||
tickets: [ 999959 ]
|
||||
|
||||
- title: "When deleting books of a specific format, show the number of books with each format available"
|
||||
|
||||
- title: "Linux install: No longer create MAN pages as all utilities have more comprehensive command line --help anyway"
|
||||
|
||||
- title: "Add a tweak Preferences->Tweaks to control the default choice of format for the Tweak Book feature"
|
||||
|
||||
- title: "Conversion: Allow setting negative page margins. A negative page margin means that calibre will not specify any page margin in the output document (for formats that support this)"
|
||||
|
||||
bug fixes:
|
||||
- title: "Tweak book: Fix handling of covers when tweaking KF8 books"
|
||||
|
||||
- title: "KF8 Output: Handle input documents with out of sequence ToC entries. Note that currently section jumping in the KF8 output produced by calibre for such files does not work."
|
||||
tickets: [1000493]
|
||||
|
||||
- title: "Edit metadata dialog: Fix the edit values button for custom tag-like columns showing a unneeded warning about changed values"
|
||||
|
||||
- title: "EPUB Output: Be a little more conservative when removing <form> tags. Only remove them if they have actual forms inside. "
|
||||
tickets: [ 1000384 ]
|
||||
|
||||
- title: "EPUB Input: Correctly update the Cover entry in the ToC even when the entry has a fragment reference. "
|
||||
tickets: [ 999973 ]
|
||||
|
||||
- title: "Update ImagMagick DLLs in all calibre binary builds to fix security vulnerabilities in ImageMagick"
|
||||
tickets: [ 999496 ]
|
||||
|
||||
- title: "Advanced search dialog: Fix equals and regex matching not being applied for custom column searches."
|
||||
tickets: [ 980221 ]
|
||||
|
||||
- title: "RTF Input: Handle old RTF files that have commands without braces."
|
||||
tickets: [ 994133 ]
|
||||
|
||||
- title: "Get Books: Diesel, fix results not showing when only a single match is found"
|
||||
|
||||
- title: "Get Books: Fix DRM status indicators for Kobo and Diesel stores. Fix smashwords not returning results."
|
||||
tickets: [ 993755 ]
|
||||
|
||||
- title: "Fix regression in 0.8.51 that broke viewing of LIT and some EPUB files"
|
||||
tickets: [998248, 998216]
|
||||
|
||||
improved recipes:
|
||||
- Clarin
|
||||
- Spiegel
|
||||
- Spiegel International
|
||||
- Montreal Gazette
|
||||
- Gosc Niedzelny
|
||||
- Ars Technica
|
||||
|
||||
new recipes:
|
||||
- title: "Army/Navy/Air force/Marine Times and News busters"
|
||||
author: jde
|
||||
|
||||
- title: "Ads of the World, Heavy Meta (Italian) and Juve La Stampa"
|
||||
author: faber1971
|
||||
|
||||
- title: "Revista Summa"
|
||||
author: Vakya
|
||||
|
||||
- title: "Strategic culture"
|
||||
author: Darko Miletic
|
||||
|
||||
- title: Stars and Stripes
|
||||
author: adoucette
|
||||
|
||||
- title: Nackdenkseiten
|
||||
author: jrda
|
||||
|
||||
|
||||
- version: 0.8.51
|
||||
date: 2012-05-11
|
||||
|
||||
|
43
recipes/air_force_times.recipe
Normal file
43
recipes/air_force_times.recipe
Normal file
@ -0,0 +1,43 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class AirForceTimes(BasicNewsRecipe):
|
||||
title = 'Air Force Times'
|
||||
__author__ = 'jde'
|
||||
__date__ = '16 May 2012'
|
||||
__version__ = '1.0'
|
||||
description = 'News of the U.S. Air Force'
|
||||
language = 'en'
|
||||
publisher = 'AirForceTimes.com'
|
||||
category = 'news, U.S. Air Force'
|
||||
tags = 'news, U.S. Air Force'
|
||||
cover_url = 'http://www.airforcetimes.com/images/logo_airforcetimes_alert.jpg'
|
||||
masthead_url = 'http://www.airforcetimes.com/images/logo_airforcetimes_alert.jpg'
|
||||
oldest_article = 7 #days
|
||||
max_articles_per_feed = 25
|
||||
publication_type = 'newspaper'
|
||||
no_stylesheets = True
|
||||
use_embedded_content = False
|
||||
encoding = None
|
||||
recursions = 0
|
||||
needs_subscription = False
|
||||
remove_javascript = True
|
||||
remove_empty_feeds = True
|
||||
auto_cleanup = True
|
||||
|
||||
|
||||
|
||||
feeds = [
|
||||
|
||||
('News', 'http://www.airforcetimes.com/rss_news.php'),
|
||||
('Benefits', 'http://www.airforcetimes.com/rss_benefits.php'),
|
||||
('Money', 'http://www.airforcetimes.com/rss_money.php'),
|
||||
('Careers & Education', 'http://www.airforcetimes.com/rss_careers.php'),
|
||||
('Community', 'http://www.airforcetimes.com/rss_community.php'),
|
||||
('Off Duty', 'http://www.airforcetimes.com/rss_off_duty.php'),
|
||||
('Entertainment', 'http://www.airforcetimes.com/rss_entertainment.php'),
|
||||
('Guard & Reserve', 'http://www.airforcetimes.com/rss_guard.php'),
|
||||
]
|
||||
|
||||
|
||||
|
||||
|
42
recipes/army_times.recipe
Normal file
42
recipes/army_times.recipe
Normal file
@ -0,0 +1,42 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
class ArmyTimes(BasicNewsRecipe):
|
||||
title = 'Army Times'
|
||||
__author__ = 'jde'
|
||||
__date__ = '16 May 2012'
|
||||
__version__ = '1.0'
|
||||
description = 'News of the U.S. Army'
|
||||
language = 'en'
|
||||
publisher = 'ArmyTimes.com'
|
||||
category = 'news, U.S. Army'
|
||||
tags = 'news, U.S. Army'
|
||||
cover_url = 'http://www.armytimes.com/images/logo_armytimes_alert.jpg'
|
||||
masthead_url = 'http://www.armytimes.com/images/logo_armytimes_alert.jpg'
|
||||
oldest_article = 7 #days
|
||||
max_articles_per_feed = 25
|
||||
publication_type = 'newspaper'
|
||||
no_stylesheets = True
|
||||
use_embedded_content = False
|
||||
encoding = None
|
||||
recursions = 0
|
||||
needs_subscription = False
|
||||
remove_javascript = True
|
||||
remove_empty_feeds = True
|
||||
auto_cleanup = True
|
||||
|
||||
|
||||
|
||||
feeds = [
|
||||
|
||||
('News', 'http://www.armytimes.com/rss_news.php'),
|
||||
('Benefits', 'http://www.armytimes.com/rss_benefits.php'),
|
||||
('Money', 'http://www.armytimes.com/rss_money.php'),
|
||||
('Careers & Education', 'http://www.armytimes.com/rss_careers.php'),
|
||||
('Community', 'http://www.armytimes.com/rss_community.php'),
|
||||
('Off Duty', 'http://www.armytimes.com/rss_off_duty.php'),
|
||||
('Entertainment', 'http://www.armytimes.com/rss_entertainment.php'),
|
||||
('Guard & Reserve', 'http://www.armytimes.com/rss_guard.php'),
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
@ -4,9 +4,8 @@ __copyright__ = '2008-2012, Darko Miletic <darko.miletic at gmail.com>'
|
||||
arstechnica.com
|
||||
'''
|
||||
|
||||
import re
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag
|
||||
from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
||||
|
||||
class ArsTechnica(BasicNewsRecipe):
|
||||
title = u'Ars Technica'
|
||||
|
@ -1,6 +1,6 @@
|
||||
|
||||
from __future__ import unicode_literals
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008-2011, Darko Miletic <darko.miletic at gmail.com>'
|
||||
__copyright__ = '2008-2012, Darko Miletic <darko.miletic at gmail.com>'
|
||||
'''
|
||||
clarin.com
|
||||
'''
|
||||
@ -8,9 +8,9 @@ clarin.com
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class Clarin(BasicNewsRecipe):
|
||||
title = 'Clarin'
|
||||
title = 'Clarín'
|
||||
__author__ = 'Darko Miletic'
|
||||
description = 'Noticias de Argentina y mundo'
|
||||
description = 'Clarin.com. Noticias de la Argentina y el mundo. Información actualizada las 24 horas y en español. Informate ya'
|
||||
publisher = 'Grupo Clarin'
|
||||
category = 'news, politics, Argentina'
|
||||
oldest_article = 2
|
||||
@ -26,9 +26,7 @@ class Clarin(BasicNewsRecipe):
|
||||
extra_css = """
|
||||
body{font-family: Arial,Helvetica,sans-serif}
|
||||
h2{font-family: Georgia,serif; font-size: xx-large}
|
||||
.hora{font-weight:bold}
|
||||
.hd p{font-size: small}
|
||||
.nombre-autor{color: #0F325A}
|
||||
.info,.nombre-autor,.hora{font-size: small}
|
||||
"""
|
||||
|
||||
conversion_options = {
|
||||
@ -38,38 +36,35 @@ class Clarin(BasicNewsRecipe):
|
||||
, 'language' : language
|
||||
}
|
||||
|
||||
keep_only_tags = [dict(attrs={'class':['hd','mt']})]
|
||||
remove_tags = [dict(name=['meta','base','link'])]
|
||||
remove_attributes = ['lang','_mce_bogus']
|
||||
keep_only_tags = [dict(attrs={'class':['hd','mt','bd']})]
|
||||
remove_tags = [dict(name=['meta','base','link','iframe','embed','object'])]
|
||||
remove_attributes = ['lang']
|
||||
|
||||
feeds = [
|
||||
(u'Pagina principal', u'http://www.clarin.com/rss/' )
|
||||
,(u'Politica' , u'http://www.clarin.com/rss/politica/' )
|
||||
,(u'Deportes' , u'http://www.clarin.com/rss/deportes/' )
|
||||
,(u'Economia' , u'http://www.clarin.com/economia/' )
|
||||
,(u'Mundo' , u'http://www.clarin.com/rss/mundo/' )
|
||||
,(u'iEco' , u'http://www.ieco.clarin.com/rss/' )
|
||||
,(u'Espectaculos' , u'http://www.clarin.com/rss/espectaculos/')
|
||||
,(u'Sociedad' , u'http://www.clarin.com/rss/sociedad/' )
|
||||
,(u'Ciudades' , u'http://www.clarin.com/rss/ciudades/' )
|
||||
,(u'Policiales' , u'http://www.clarin.com/rss/policiales/' )
|
||||
,(u'Internet' , u'http://www.clarin.com/rss/internet/' )
|
||||
,(u'Ciudades' , u'http://www.clarin.com/rss/ciudades/' )
|
||||
]
|
||||
|
||||
|
||||
def get_article_url(self, article):
|
||||
return article.get('guid', None)
|
||||
|
||||
def print_version(self, url):
|
||||
return url + '?print=1'
|
||||
|
||||
def get_article_url(self, article):
|
||||
return article.get('guid', None)
|
||||
|
||||
def get_cover_url(self):
|
||||
cover_url = None
|
||||
soup = self.index_to_soup(self.INDEX)
|
||||
cover_item = soup.find('div',attrs={'class':'bb-md bb-md-edicion_papel'})
|
||||
if cover_item:
|
||||
ap = cover_item.find('a',attrs={'href':'/edicion-impresa/'})
|
||||
if ap:
|
||||
cover_url = self.INDEX + ap.img['src']
|
||||
for item in soup.findAll('a', href=True):
|
||||
if item['href'].startswith('/tapas/TAPA_CLA'):
|
||||
cover_url = self.INDEX + item['href']
|
||||
return cover_url
|
||||
return cover_url
|
||||
|
||||
|
30
recipes/economico.recipe
Normal file
30
recipes/economico.recipe
Normal file
@ -0,0 +1,30 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class Economico(BasicNewsRecipe):
|
||||
title = u'Economico'
|
||||
language = 'pt'
|
||||
__author__ = 'Krittika Goyal'
|
||||
oldest_article = 1 #days
|
||||
max_articles_per_feed = 25
|
||||
encoding = 'utf-8'
|
||||
use_embedded_content = False
|
||||
|
||||
no_stylesheets = True
|
||||
auto_cleanup = True
|
||||
|
||||
|
||||
feeds = [
|
||||
('Ultima Hora',
|
||||
'http://economico.sapo.pt/rss/ultimas'),
|
||||
('Em Foco',
|
||||
'http://economico.sapo.pt/rss/emfoco'),
|
||||
('Mercados',
|
||||
'http://economico.sapo.pt/rss/mercados'),
|
||||
('Empresas',
|
||||
'http://economico.sapo.pt/rss/empresas'),
|
||||
('Economia',
|
||||
'http://economico.sapo.pt/rss/economia'),
|
||||
('Politica',
|
||||
'http://economico.sapo.pt/rss/politica'),
|
||||
]
|
||||
|
42
recipes/marine_corps_times.recipe
Normal file
42
recipes/marine_corps_times.recipe
Normal file
@ -0,0 +1,42 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
class MarineCorpsTimes(BasicNewsRecipe):
|
||||
title = 'Marine Corps Times'
|
||||
__author__ = 'jde'
|
||||
__date__ = '16 May 2012'
|
||||
__version__ = '1.0'
|
||||
description = 'News of the U.S. Marine Corps'
|
||||
language = 'en'
|
||||
publisher = 'MarineCorpsTimes.com'
|
||||
category = 'news, U.S. Marine Corps'
|
||||
tags = 'news, U.S. Marine Corps'
|
||||
cover_url = 'http://www.marinecorpstimes.com/images/logo_marinetimes-alert.jpg'
|
||||
masthead_url = 'http://www.marinecorpstimes.com/images/logo_marinetimes-alert.jpg'
|
||||
oldest_article = 7 #days
|
||||
max_articles_per_feed = 25
|
||||
publication_type = 'newspaper'
|
||||
no_stylesheets = True
|
||||
use_embedded_content = False
|
||||
encoding = None
|
||||
recursions = 0
|
||||
needs_subscription = False
|
||||
remove_javascript = True
|
||||
remove_empty_feeds = True
|
||||
auto_cleanup = True
|
||||
|
||||
|
||||
|
||||
feeds = [
|
||||
|
||||
('News', 'http://www.MarineCorpstimes.com/rss_news.php'),
|
||||
('Benefits', 'http://www.MarineCorpstimes.com/rss_benefits.php'),
|
||||
('Money', 'http://www.MarineCorpstimes.com/rss_money.php'),
|
||||
('Careers & Education', 'http://www.MarineCorpstimes.com/rss_careers.php'),
|
||||
('Community', 'http://www.MarineCorpstimes.com/rss_community.php'),
|
||||
('Off Duty', 'http://www.MarineCorpstimes.com/rss_off_duty.php'),
|
||||
('Entertainment', 'http://www.MarineCorpstimes.com/rss_entertainment.php'),
|
||||
('Guard & Reserve', 'http://www.MarineCorpstimes.com/rss_guard.php'),
|
||||
]
|
||||
|
||||
|
||||
|
||||
|
41
recipes/military_times.recipe
Normal file
41
recipes/military_times.recipe
Normal file
@ -0,0 +1,41 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class MilitaryTimes(BasicNewsRecipe):
|
||||
title = 'Military Times'
|
||||
__author__ = 'jde'
|
||||
__date__ = '16 May 2012'
|
||||
__version__ = '1.0'
|
||||
description = 'News of the U.S. Military'
|
||||
language = 'en'
|
||||
publisher = 'MilitaryTimes.com'
|
||||
category = 'news, U.S. Military'
|
||||
tags = 'news, U.S. Military'
|
||||
cover_url = 'http://www.militarytimes.com/images/logo_militarytimes_landing-s.gif'
|
||||
masthead_url = 'http://www.militarytimes.com/images/logo_militarytimes_landing-s.gif'
|
||||
oldest_article = 7 #days
|
||||
max_articles_per_feed = 25
|
||||
publication_type = 'newspaper'
|
||||
no_stylesheets = True
|
||||
use_embedded_content = False
|
||||
encoding = None
|
||||
recursions = 0
|
||||
needs_subscription = False
|
||||
remove_javascript = True
|
||||
remove_empty_feeds = True
|
||||
auto_cleanup = True
|
||||
|
||||
|
||||
|
||||
feeds = [
|
||||
|
||||
('News', 'http://www.militarytimes.com/rss_news.php'),
|
||||
('Benefits', 'http://www.militarytimes.com/rss_benefits.php'),
|
||||
('Money', 'http://www.militarytimes.com/rss_money.php'),
|
||||
('Careers & Education', 'http://www.militarytimes.com/rss_careers.php'),
|
||||
('Community', 'http://www.militarytimes.com/rss_community.php'),
|
||||
('Off Duty', 'http://www.militarytimes.com/rss_off_duty.php'),
|
||||
('Entertainment', 'http://www.militarytimes.com/rss_entertainment.php'),
|
||||
('Guard & Reserve', 'http://www.militarytimes.com/rss_guard.php'),
|
||||
|
||||
]
|
||||
|
42
recipes/navy_times.recipe
Normal file
42
recipes/navy_times.recipe
Normal file
@ -0,0 +1,42 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
class NavyTimes(BasicNewsRecipe):
|
||||
title = 'Navy Times'
|
||||
__author__ = 'jde'
|
||||
__date__ = '16 May 2012'
|
||||
__version__ = '1.0'
|
||||
description = 'News of the U.S. Navy'
|
||||
language = 'en'
|
||||
publisher = 'NavyTimes.com'
|
||||
category = 'news, U.S. Navy'
|
||||
tags = 'news, U.S. Navy'
|
||||
cover_url = 'http://www.navytimes.com/images/logo_navytimes_alert.jpg'
|
||||
masthead_url = 'http://www.navytimes.com/images/logo_navytimes_alert.jpg'
|
||||
oldest_article = 7 #days
|
||||
max_articles_per_feed = 25
|
||||
publication_type = 'newspaper'
|
||||
no_stylesheets = True
|
||||
use_embedded_content = False
|
||||
encoding = None
|
||||
recursions = 0
|
||||
needs_subscription = False
|
||||
remove_javascript = True
|
||||
remove_empty_feeds = True
|
||||
auto_cleanup = True
|
||||
|
||||
|
||||
|
||||
feeds = [
|
||||
|
||||
('News', 'http://www.navytimes.com/rss_news.php'),
|
||||
('Benefits', 'http://www.navytimes.com/rss_benefits.php'),
|
||||
('Money', 'http://www.navytimes.com/rss_money.php'),
|
||||
('Careers & Education', 'http://www.navytimes.com/rss_careers.php'),
|
||||
('Community', 'http://www.navytimes.com/rss_community.php'),
|
||||
('Off Duty', 'http://www.navytimes.com/rss_off_duty.php'),
|
||||
('Entertainment', 'http://www.navytimes.com/rss_entertainment.php'),
|
||||
('Guard & Reserve', 'http://www.navytimes.com/rss_guard.php'),
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
20
recipes/news_busters.recipe
Normal file
20
recipes/news_busters.recipe
Normal file
@ -0,0 +1,20 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class NewsBusters(BasicNewsRecipe):
|
||||
title = u'News Busters'
|
||||
description = 'Exposing and Combating Liberal Media Bias'
|
||||
__author__ = 'jde'
|
||||
oldest_article = 1#day
|
||||
max_articles_per_feed = 100
|
||||
cover_url = "http://newsbusters.org/sites/all/themes/genesis_nb/images/nb-mrc.png"
|
||||
language = 'en'
|
||||
encoding = 'utf8'
|
||||
needs_subscription = False
|
||||
remove_javascript = True
|
||||
recursions = 0
|
||||
use_embedded_content = False
|
||||
no_stylesheets = True
|
||||
auto_cleanup = True
|
||||
|
||||
feeds = [(u'Blog', u'http://www.newsbusters.org/rss.xml')]
|
||||
|
@ -1,5 +1,5 @@
|
||||
"""
|
||||
Pocket Calibre Recipe v1.1
|
||||
Pocket Calibre Recipe v1.2
|
||||
"""
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '''
|
||||
@ -97,7 +97,12 @@ class Pocket(BasicNewsRecipe):
|
||||
self.readList.append(readLink)
|
||||
totalfeeds.append((feedtitle, articles))
|
||||
if len(self.readList) < self.minimum_articles:
|
||||
raise Exception("Not enough articles in Pocket! Change minimum_articles or add more articles.")
|
||||
self.mark_as_read_after_dl = False
|
||||
if hasattr(self, 'abort_recipe_processing'):
|
||||
self.abort_recipe_processing("Only %d articles retrieved, minimum_articles not reached" % len(self.readList))
|
||||
else:
|
||||
self.log.exception("Only %d articles retrieved, minimum_articles not reached" % len(self.readList))
|
||||
return []
|
||||
return totalfeeds
|
||||
|
||||
def mark_as_read(self, markList):
|
||||
|
@ -1,3 +1,4 @@
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008-2011, Darko Miletic <darko.miletic at gmail.com>'
|
||||
'''
|
||||
@ -15,6 +16,8 @@ class Spiegel_int(BasicNewsRecipe):
|
||||
language = 'en_DE'
|
||||
no_stylesheets = True
|
||||
use_embedded_content = False
|
||||
auto_cleanup = True
|
||||
auto_cleanup_keep = '//*[@id="spArticleTopAsset"]'
|
||||
encoding = 'cp1252'
|
||||
publisher = 'SPIEGEL ONLINE GmbH'
|
||||
category = 'news, politics, Germany'
|
||||
@ -43,25 +46,25 @@ class Spiegel_int(BasicNewsRecipe):
|
||||
.spPhotoGallery{font-size:x-small; color:#990000 ;}
|
||||
'''
|
||||
|
||||
keep_only_tags = [dict(attrs={'id':'spArticleContent'})]
|
||||
remove_tags_after = dict(attrs={'id':'spArticleBody'})
|
||||
remove_tags = [dict(name=['meta','base','iframe','embed','object'])]
|
||||
remove_attributes = ['clear']
|
||||
#keep_only_tags = [dict(attrs={'id':'spArticleContent'})]
|
||||
#remove_tags_after = dict(attrs={'id':'spArticleBody'})
|
||||
#remove_tags = [dict(name=['meta','base','iframe','embed','object'])]
|
||||
#remove_attributes = ['clear']
|
||||
feeds = [(u'Spiegel Online', u'http://www.spiegel.de/international/index.rss')]
|
||||
|
||||
def print_version(self, url):
|
||||
main, sep, rest = url.rpartition(',')
|
||||
rmain, rsep, rrest = main.rpartition(',')
|
||||
return rmain + ',druck-' + rrest + ',' + rest
|
||||
#def print_version(self, url):
|
||||
#main, sep, rest = url.rpartition(',')
|
||||
#rmain, rsep, rrest = main.rpartition(',')
|
||||
#return rmain + ',druck-' + rrest + ',' + rest
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
for item in soup.findAll(style=True):
|
||||
del item['style']
|
||||
for item in soup.findAll('a'):
|
||||
if item.string is not None:
|
||||
str = item.string
|
||||
item.replaceWith(str)
|
||||
else:
|
||||
str = self.tag_to_string(item)
|
||||
item.replaceWith(str)
|
||||
return soup
|
||||
#def preprocess_html(self, soup):
|
||||
#for item in soup.findAll(style=True):
|
||||
#del item['style']
|
||||
#for item in soup.findAll('a'):
|
||||
#if item.string is not None:
|
||||
#str = item.string
|
||||
#item.replaceWith(str)
|
||||
#else:
|
||||
#str = self.tag_to_string(item)
|
||||
#item.replaceWith(str)
|
||||
#return soup
|
||||
|
@ -6,7 +6,6 @@ __copyright__ = '2009, Darko Miletic <darko.miletic at gmail.com>'
|
||||
spiegel.de
|
||||
'''
|
||||
|
||||
from time import strftime
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class Spiegel_ger(BasicNewsRecipe):
|
||||
@ -21,6 +20,8 @@ class Spiegel_ger(BasicNewsRecipe):
|
||||
lang = 'de-DE'
|
||||
no_stylesheets = True
|
||||
use_embedded_content = False
|
||||
auto_cleanup = True
|
||||
auto_cleanup_keep = '//*[@id="spArticleTopAsset"]'
|
||||
encoding = 'cp1252'
|
||||
|
||||
conversion_options = {
|
||||
@ -31,20 +32,9 @@ class Spiegel_ger(BasicNewsRecipe):
|
||||
}
|
||||
|
||||
|
||||
keep_only_tags = [dict(name='div', attrs={'id':'spArticleContent'})]
|
||||
|
||||
remove_tags = [dict(name=['object','link','base','iframe'])]
|
||||
|
||||
remove_tags_after = dict(name='div', attrs={'id':'spArticleBody'})
|
||||
|
||||
feeds = [(u'Spiegel Online', u'http://www.spiegel.de/schlagzeilen/index.rss')]
|
||||
|
||||
def print_version(self, url):
|
||||
rmt = url.rpartition('#')[0]
|
||||
main, sep, rest = rmt.rpartition(',')
|
||||
rmain, rsep, rrest = main.rpartition(',')
|
||||
purl = rmain + ',druck-' + rrest + ',' + rest
|
||||
return purl
|
||||
|
||||
def get_cover_url(self):
|
||||
return 'http://wissen.spiegel.de/wissen/titel/SP/' + strftime("%Y/%W/%j/titel.jpg")
|
||||
|
||||
|
@ -1,8 +1,9 @@
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
|
||||
__copyright__ = '2012, mkydgr'
|
||||
'''
|
||||
www.wired.com
|
||||
based on the (broken) built-in recipe by Darko Miletic <darko.miletic at gmail.com>
|
||||
'''
|
||||
|
||||
import re
|
||||
@ -11,11 +12,11 @@ from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class Wired(BasicNewsRecipe):
|
||||
title = 'Wired Magazine'
|
||||
__author__ = 'Darko Miletic'
|
||||
description = 'Gaming news'
|
||||
__author__ = 'mkydgr'
|
||||
description = 'Technology News'
|
||||
publisher = 'Conde Nast Digital'
|
||||
category = 'news, games, IT, gadgets'
|
||||
oldest_article = 32
|
||||
category = ''
|
||||
oldest_article = 500
|
||||
delay = 1
|
||||
max_articles_per_feed = 100
|
||||
no_stylesheets = True
|
||||
@ -25,7 +26,8 @@ class Wired(BasicNewsRecipe):
|
||||
language = 'en'
|
||||
publication_type = 'magazine'
|
||||
extra_css = ' body{font-family: Arial,Verdana,sans-serif} .entryDescription li {display: inline; list-style-type: none} '
|
||||
index = 'http://www.wired.com/magazine/'
|
||||
index = 'http://www.wired.com/magazine'
|
||||
departments = ['features','start','test','play','found', 'reviews']
|
||||
|
||||
preprocess_regexps = [(re.compile(r'<meta name="Title".*<title>', re.DOTALL|re.IGNORECASE),lambda match: '<title>')]
|
||||
conversion_options = {
|
||||
@ -38,80 +40,53 @@ class Wired(BasicNewsRecipe):
|
||||
keep_only_tags = [dict(name='div', attrs={'class':'post'})]
|
||||
remove_tags_after = dict(name='div', attrs={'class':'tweetmeme_button'})
|
||||
remove_tags = [
|
||||
dict(name=['object','embed','iframe','link','meta','base'])
|
||||
dict(name=['object','embed','iframe','link'])
|
||||
,dict(name='div', attrs={'class':['podcast_storyboard','tweetmeme_button']})
|
||||
,dict(attrs={'id':'ff_bottom_nav'})
|
||||
,dict(name='a',attrs={'href':'http://www.wired.com/app'})
|
||||
]
|
||||
remove_attributes = ['height','width','lang','border','clear']
|
||||
remove_attributes = ['height','width']
|
||||
|
||||
|
||||
def parse_index(self):
|
||||
totalfeeds = []
|
||||
|
||||
soup = self.index_to_soup(self.index)
|
||||
majorf = soup.find('div',attrs={'class':'index'})
|
||||
if majorf:
|
||||
pfarticles = []
|
||||
firsta = majorf.find(attrs={'class':'spread-header'})
|
||||
if firsta:
|
||||
pfarticles.append({
|
||||
'title' :self.tag_to_string(firsta.a)
|
||||
,'date' :strftime(self.timefmt)
|
||||
,'url' :'http://www.wired.com' + firsta.a['href']
|
||||
,'description':''
|
||||
})
|
||||
for itt in majorf.findAll('li'):
|
||||
itema = itt.find('a',href=True)
|
||||
if itema:
|
||||
pfarticles.append({
|
||||
'title' :self.tag_to_string(itema)
|
||||
,'date' :strftime(self.timefmt)
|
||||
,'url' :'http://www.wired.com' + itema['href']
|
||||
,'description':''
|
||||
})
|
||||
totalfeeds.append(('Cover', pfarticles))
|
||||
features = soup.find('div',attrs={'id':'my-glider'})
|
||||
if features:
|
||||
farticles = []
|
||||
for item in features.findAll('div',attrs={'class':'section'}):
|
||||
divurl = item.find('div',attrs={'class':'feature-header'})
|
||||
if divurl:
|
||||
divdesc = item.find('div',attrs={'class':'feature-text'})
|
||||
url = divurl.a['href']
|
||||
if not divurl.a['href'].startswith('http://www.wired.com'):
|
||||
url = 'http://www.wired.com' + divurl.a['href']
|
||||
title = self.tag_to_string(divurl.a)
|
||||
description = self.tag_to_string(divdesc)
|
||||
date = strftime(self.timefmt)
|
||||
farticles.append({
|
||||
'title' :title
|
||||
,'date' :date
|
||||
,'url' :url
|
||||
,'description':description
|
||||
})
|
||||
totalfeeds.append(('Featured Articles', farticles))
|
||||
|
||||
#department feeds
|
||||
departments = ['rants','start','test','play','found']
|
||||
dept = soup.find('div',attrs={'id':'magazine-departments'})
|
||||
if dept:
|
||||
for ditem in departments:
|
||||
depts = soup.find('div',attrs={'id':'department-posts'})
|
||||
|
||||
if depts:
|
||||
for ditem in self.departments:
|
||||
darticles = []
|
||||
department = dept.find('div',attrs={'id':'department-'+ditem})
|
||||
department = depts.find('h3',attrs={'id':'department-'+ditem})
|
||||
if department:
|
||||
for item in department.findAll('div'):
|
||||
description = ''
|
||||
feed_link = item.find('a')
|
||||
#print '\n###### Found department %s ########'%(ditem)
|
||||
|
||||
el = department.next
|
||||
while el and (el.__class__.__name__ == 'NavigableString' or el.name != 'h3'):
|
||||
if el.__class__.__name__ != 'NavigableString':
|
||||
#print '\t ... element',el.name
|
||||
if el.name == 'ul':
|
||||
for artitem in el.findAll('li'):
|
||||
#print '\t\t ... article',repr(artitem)
|
||||
feed_link = artitem.find('a')
|
||||
#print '\t\t\t ... link',repr(feed_link)
|
||||
if feed_link and feed_link.has_key('href'):
|
||||
url = feed_link['href']
|
||||
url = self.makeurl(feed_link['href'])
|
||||
title = self.tag_to_string(feed_link)
|
||||
date = strftime(self.timefmt)
|
||||
#print '\t\t ... found "%s" %s'%(title,url)
|
||||
darticles.append({
|
||||
'title' :title
|
||||
,'date' :date
|
||||
,'url' :url
|
||||
,'description':description
|
||||
,'description':''
|
||||
})
|
||||
el = None
|
||||
else:
|
||||
el = el.next
|
||||
|
||||
totalfeeds.append((ditem.capitalize(), darticles))
|
||||
return totalfeeds
|
||||
|
||||
@ -120,7 +95,7 @@ class Wired(BasicNewsRecipe):
|
||||
soup = self.index_to_soup(self.index)
|
||||
cover_item = soup.find('div',attrs={'class':'spread-image'})
|
||||
if cover_item:
|
||||
cover_url = 'http://www.wired.com' + cover_item.a.img['src']
|
||||
cover_url = self.makeurl(cover_item.a.img['src'])
|
||||
return cover_url
|
||||
|
||||
def print_version(self, url):
|
||||
@ -129,17 +104,10 @@ class Wired(BasicNewsRecipe):
|
||||
def preprocess_html(self, soup):
|
||||
for item in soup.findAll(style=True):
|
||||
del item['style']
|
||||
for item in soup.findAll('a'):
|
||||
if item.string is not None:
|
||||
tstr = item.string
|
||||
item.replaceWith(tstr)
|
||||
else:
|
||||
item.name='span'
|
||||
for atrs in ['href','target','alt','title','name','id']:
|
||||
if item.has_key(atrs):
|
||||
del item[atrs]
|
||||
for item in soup.findAll('img'):
|
||||
if not item.has_key('alt'):
|
||||
item['alt'] = 'image'
|
||||
return soup
|
||||
|
||||
def makeurl(self, addr):
|
||||
if addr[:4] != 'http' : addr='http://www.wired.com' + addr
|
||||
while addr[-2:] == '//' : addr=addr[:-1]
|
||||
return addr
|
||||
|
||||
|
15
session.vim
15
session.vim
@ -21,7 +21,10 @@ vipy.session.initialize(project_name='calibre', src_dir=src_dir,
|
||||
|
||||
def recipe_title_callback(raw):
|
||||
try:
|
||||
return eval(raw.decode('utf-8')).replace(u' ', u'_')
|
||||
raw = eval(raw)
|
||||
if isinstance(raw, bytes):
|
||||
raw = raw.decode('utf-8')
|
||||
return raw.replace(u' ', u'_')
|
||||
except:
|
||||
print ('Failed to decode recipe title: %r'%raw)
|
||||
raise
|
||||
@ -31,4 +34,12 @@ vipy.session.add_content_browser('<leader>r', 'Recipe',
|
||||
vipy.session.regexp_based_matcher(r'title\s*=\s*(?P<title>.+)', 'title', recipe_title_callback))
|
||||
EOFPY
|
||||
|
||||
nmap \log :enew<CR>:read ! bzr log -l 500 <CR>:e Changelog.yaml<CR>:e src/calibre/constants.py<CR>
|
||||
fun! CalibreLog()
|
||||
enew
|
||||
read ! bzr log -l 500
|
||||
set nomodifiable noswapfile buftype=nofile
|
||||
edit Changelog.yaml
|
||||
edit src/calibre/constants.py
|
||||
endfun
|
||||
|
||||
nnoremap \log :call CalibreLog()<CR>
|
||||
|
@ -120,7 +120,7 @@ if iswindows:
|
||||
poppler_lib_dirs = consolidate('POPPLER_LIB_DIR', sw_lib_dir)
|
||||
popplerqt4_lib_dirs = poppler_lib_dirs
|
||||
poppler_libs = ['poppler']
|
||||
magick_inc_dirs = [os.path.join(prefix, 'build', 'ImageMagick-6.6.6')]
|
||||
magick_inc_dirs = [os.path.join(prefix, 'build', 'ImageMagick-6.7.6')]
|
||||
magick_lib_dirs = [os.path.join(magick_inc_dirs[0], 'VisualMagick', 'lib')]
|
||||
magick_libs = ['CORE_RL_wand_', 'CORE_RL_magick_']
|
||||
podofo_inc = os.path.join(sw_inc_dir, 'podofo')
|
||||
|
@ -41,8 +41,8 @@ binary_includes = [
|
||||
'/usr/lib/libgthread-2.0.so.0',
|
||||
'/usr/lib/libpng14.so.14',
|
||||
'/usr/lib/libexslt.so.0',
|
||||
MAGICK_PREFIX+'/lib/libMagickWand.so.4',
|
||||
MAGICK_PREFIX+'/lib/libMagickCore.so.4',
|
||||
MAGICK_PREFIX+'/lib/libMagickWand.so.5',
|
||||
MAGICK_PREFIX+'/lib/libMagickCore.so.5',
|
||||
'/usr/lib/libgcrypt.so.11',
|
||||
'/usr/lib/libgpg-error.so.0',
|
||||
'/usr/lib/libphonon.so.4',
|
||||
|
@ -429,7 +429,7 @@ class Py2App(object):
|
||||
def add_imagemagick(self):
|
||||
info('\nAdding ImageMagick')
|
||||
for x in ('Wand', 'Core'):
|
||||
self.install_dylib(os.path.join(SW, 'lib', 'libMagick%s.4.dylib'%x))
|
||||
self.install_dylib(os.path.join(SW, 'lib', 'libMagick%s.5.dylib'%x))
|
||||
idir = glob.glob(os.path.join(SW, 'lib', 'ImageMagick-*'))[-1]
|
||||
dest = os.path.join(self.frameworks_dir, 'ImageMagick')
|
||||
if os.path.exists(dest):
|
||||
|
@ -18,7 +18,7 @@ QT_DIR = 'Q:\\Qt\\4.8.1'
|
||||
QT_DLLS = ['Core', 'Gui', 'Network', 'Svg', 'WebKit', 'Xml', 'XmlPatterns']
|
||||
LIBUNRAR = 'C:\\Program Files\\UnrarDLL\\unrar.dll'
|
||||
SW = r'C:\cygwin\home\kovid\sw'
|
||||
IMAGEMAGICK = os.path.join(SW, 'build', 'ImageMagick-6.6.6',
|
||||
IMAGEMAGICK = os.path.join(SW, 'build', 'ImageMagick-6.7.6',
|
||||
'VisualMagick', 'bin')
|
||||
CRT = r'C:\Microsoft.VC90.CRT'
|
||||
|
||||
|
@ -336,6 +336,8 @@ Index: src/PdfFiltersPrivate.cpp
|
||||
ImageMagick
|
||||
--------------
|
||||
|
||||
Get the source from: http://www.imagemagick.org/download/windows/ImageMagick-windows.zip
|
||||
|
||||
Edit VisualMagick/configure/configure.cpp to set
|
||||
|
||||
int projectType = MULTITHREADEDDLL;
|
||||
@ -349,7 +351,10 @@ Edit magick/magick-config.h
|
||||
Undefine ProvideDllMain and MAGICKCORE_X11_DELEGATE
|
||||
|
||||
Now open VisualMagick/VisualDynamicMT.sln set to Release
|
||||
Remove the CORE_xlib and UTIL_Imdisplay project CORE_Magick++
|
||||
Remove the CORE_xlib, UTIL_Imdisplay and CORE_Magick++ projects.
|
||||
|
||||
F7 for build project, you will get one error due to the removal of xlib, ignore
|
||||
it.
|
||||
|
||||
calibre
|
||||
---------
|
||||
|
1430
setup/iso_639/is.po
1430
setup/iso_639/is.po
File diff suppressed because it is too large
Load Diff
@ -10,14 +10,14 @@ msgstr ""
|
||||
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
|
||||
"devel@lists.alioth.debian.org>\n"
|
||||
"POT-Creation-Date: 2011-11-25 14:01+0000\n"
|
||||
"PO-Revision-Date: 2012-04-22 07:11+0000\n"
|
||||
"PO-Revision-Date: 2012-05-12 10:25+0000\n"
|
||||
"Last-Translator: kulkke <Unknown>\n"
|
||||
"Language-Team: Turkish <gnome-turk@gnome.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2012-04-23 04:45+0000\n"
|
||||
"X-Generator: Launchpad (build 15135)\n"
|
||||
"X-Launchpad-Export-Date: 2012-05-13 04:43+0000\n"
|
||||
"X-Generator: Launchpad (build 15225)\n"
|
||||
"Language: tr\n"
|
||||
|
||||
#. name for aaa
|
||||
@ -406,7 +406,7 @@ msgstr ""
|
||||
|
||||
#. name for aed
|
||||
msgid "Argentine Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Arjantin İşaret Dili"
|
||||
|
||||
#. name for aee
|
||||
msgid "Pashayi; Northeast"
|
||||
@ -1554,7 +1554,7 @@ msgstr "Dano"
|
||||
|
||||
#. name for asp
|
||||
msgid "Algerian Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Cezayir İşaret Dili"
|
||||
|
||||
#. name for asq
|
||||
msgid "Austrian Sign Language"
|
||||
@ -2578,7 +2578,7 @@ msgstr "Blafe"
|
||||
|
||||
#. name for bfi
|
||||
msgid "British Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Britanya İşaret Dili"
|
||||
|
||||
#. name for bfj
|
||||
msgid "Bafanji"
|
||||
@ -4167,7 +4167,7 @@ msgstr "Bukat"
|
||||
|
||||
#. name for bvl
|
||||
msgid "Bolivian Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Bolivya İşaret Dili"
|
||||
|
||||
#. name for bvm
|
||||
msgid "Bamunka"
|
||||
@ -4587,7 +4587,7 @@ msgstr "Biri"
|
||||
|
||||
#. name for bzs
|
||||
msgid "Brazilian Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Brezilya İşaret Dili"
|
||||
|
||||
#. name for bzt
|
||||
msgid "Brithenig"
|
||||
@ -5623,11 +5623,11 @@ msgstr ""
|
||||
|
||||
#. name for csf
|
||||
msgid "Cuba Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Küba İşaret Dili"
|
||||
|
||||
#. name for csg
|
||||
msgid "Chilean Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Şili İşaret Dili"
|
||||
|
||||
#. name for csh
|
||||
msgid "Chin; Asho"
|
||||
@ -5651,7 +5651,7 @@ msgstr ""
|
||||
|
||||
#. name for csn
|
||||
msgid "Colombian Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Kolombiya İşaret Dili"
|
||||
|
||||
#. name for cso
|
||||
msgid "Chinantec; Sochiapan"
|
||||
@ -5663,7 +5663,7 @@ msgstr ""
|
||||
|
||||
#. name for csr
|
||||
msgid "Costa Rican Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Kosta Rika İşaret Dili"
|
||||
|
||||
#. name for css
|
||||
msgid "Ohlone; Southern"
|
||||
@ -7347,7 +7347,7 @@ msgstr ""
|
||||
|
||||
#. name for esl
|
||||
msgid "Egypt Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Mısır İşaret Dili"
|
||||
|
||||
#. name for esm
|
||||
msgid "Esuma"
|
||||
@ -7551,7 +7551,7 @@ msgstr ""
|
||||
|
||||
#. name for fcs
|
||||
msgid "Quebec Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Quebec İşaret Dili"
|
||||
|
||||
#. name for fer
|
||||
msgid "Feroge"
|
||||
@ -8806,7 +8806,7 @@ msgstr ""
|
||||
|
||||
#. name for gsm
|
||||
msgid "Guatemalan Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Guatemala İşaret Dili"
|
||||
|
||||
#. name for gsn
|
||||
msgid "Gusan"
|
||||
@ -10895,7 +10895,7 @@ msgstr ""
|
||||
|
||||
#. name for jos
|
||||
msgid "Jordanian Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Ürdün İşaret Dili"
|
||||
|
||||
#. name for jow
|
||||
msgid "Jowulu"
|
||||
@ -13847,7 +13847,7 @@ msgstr ""
|
||||
|
||||
#. name for lbs
|
||||
msgid "Libyan Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Libya İşaret Dili"
|
||||
|
||||
#. name for lbt
|
||||
msgid "Lachi"
|
||||
@ -15591,7 +15591,7 @@ msgstr ""
|
||||
|
||||
#. name for mfs
|
||||
msgid "Mexican Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Meksika İşaret Dili"
|
||||
|
||||
#. name for mft
|
||||
msgid "Mokerang"
|
||||
@ -17055,7 +17055,7 @@ msgstr ""
|
||||
|
||||
#. name for mul
|
||||
msgid "Multiple languages"
|
||||
msgstr ""
|
||||
msgstr "Çoklu diller"
|
||||
|
||||
#. name for mum
|
||||
msgid "Maiwala"
|
||||
@ -17867,7 +17867,7 @@ msgstr ""
|
||||
|
||||
#. name for ncs
|
||||
msgid "Nicaraguan Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Nikaragua İşaret Dili"
|
||||
|
||||
#. name for nct
|
||||
msgid "Naga; Chothe"
|
||||
@ -19495,7 +19495,7 @@ msgstr ""
|
||||
|
||||
#. name for nzs
|
||||
msgid "New Zealand Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Yeni Zelanda İşaret Dili"
|
||||
|
||||
#. name for nzu
|
||||
msgid "Teke-Nzikou"
|
||||
@ -21219,7 +21219,7 @@ msgstr ""
|
||||
|
||||
#. name for prl
|
||||
msgid "Peruvian Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Peru İşaret Dili"
|
||||
|
||||
#. name for prm
|
||||
msgid "Kibiri"
|
||||
@ -22699,7 +22699,7 @@ msgstr ""
|
||||
|
||||
#. name for sdl
|
||||
msgid "Saudi Arabian Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Suudi Arabistan İşaret Dili"
|
||||
|
||||
#. name for sdm
|
||||
msgid "Semandang"
|
||||
@ -22847,7 +22847,7 @@ msgstr ""
|
||||
|
||||
#. name for sfs
|
||||
msgid "South African Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Güney Afrika İşaret Dili"
|
||||
|
||||
#. name for sfw
|
||||
msgid "Sehwi"
|
||||
@ -25943,7 +25943,7 @@ msgstr ""
|
||||
|
||||
#. name for tse
|
||||
msgid "Tunisian Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Tunus İşaret Dili"
|
||||
|
||||
#. name for tsf
|
||||
msgid "Tamang; Southwestern"
|
||||
@ -27348,7 +27348,7 @@ msgstr ""
|
||||
|
||||
#. name for vsl
|
||||
msgid "Venezuelan Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Venezuela İşaret Dili"
|
||||
|
||||
#. name for vsv
|
||||
msgid "Valencian Sign Language"
|
||||
@ -28760,7 +28760,7 @@ msgstr ""
|
||||
|
||||
#. name for xms
|
||||
msgid "Moroccan Sign Language"
|
||||
msgstr ""
|
||||
msgstr "Fas İşaret Dili"
|
||||
|
||||
#. name for xmt
|
||||
msgid "Matbat"
|
||||
@ -29540,7 +29540,7 @@ msgstr ""
|
||||
|
||||
#. name for yid
|
||||
msgid "Yiddish"
|
||||
msgstr "Yiddiş"
|
||||
msgstr "Yidiş"
|
||||
|
||||
#. name for yif
|
||||
msgid "Ache"
|
||||
|
@ -4,7 +4,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
__appname__ = u'calibre'
|
||||
numeric_version = (0, 8, 51)
|
||||
numeric_version = (0, 8, 52)
|
||||
__version__ = u'.'.join(map(unicode, numeric_version))
|
||||
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"
|
||||
|
||||
|
@ -302,7 +302,9 @@ class OutputFormatPlugin(Plugin):
|
||||
|
||||
:param item: The item (HTML file) being processed
|
||||
:param stylizer: A Stylizer object containing the flattened styles for
|
||||
item. You can get the style for any element by stylizer.style(element).
|
||||
item. You can get the style for any element by
|
||||
stylizer.style(element).
|
||||
|
||||
'''
|
||||
pass
|
||||
|
||||
|
@ -57,10 +57,11 @@ class PICO(NEWSMY):
|
||||
gui_name = 'Pico'
|
||||
description = _('Communicate with the Pico reader.')
|
||||
|
||||
VENDOR_NAME = ['TECLAST', 'IMAGIN', 'LASER-']
|
||||
VENDOR_NAME = ['TECLAST', 'IMAGIN', 'LASER-', '']
|
||||
WINDOWS_MAIN_MEM = ['USBDISK__USER', 'EB720']
|
||||
EBOOK_DIR_MAIN = 'Books'
|
||||
FORMATS = ['EPUB', 'FB2', 'TXT', 'LRC', 'PDB', 'PDF', 'HTML', 'WTXT']
|
||||
SCAN_FROM_ROOT = True
|
||||
|
||||
class IPAPYRUS(TECLAST_K3):
|
||||
|
||||
|
@ -207,7 +207,7 @@ class EPUBInput(InputFormatPlugin):
|
||||
if rc:
|
||||
cover_toc_item = None
|
||||
for item in oeb.toc.iterdescendants():
|
||||
if item.href == rc:
|
||||
if item.href and item.href.partition('#')[0] == rc:
|
||||
cover_toc_item = item
|
||||
break
|
||||
spine = {x.href for x in oeb.spine}
|
||||
|
@ -393,8 +393,14 @@ class EPUBOutput(OutputFormatPlugin):
|
||||
for tag in XPath('//h:body/descendant::h:script')(root):
|
||||
tag.getparent().remove(tag)
|
||||
|
||||
formchildren = XPath('./h:input|./h:button|./h:textarea|'
|
||||
'./h:label|./h:fieldset|./h:legend')
|
||||
for tag in XPath('//h:form')(root):
|
||||
if formchildren(tag):
|
||||
tag.getparent().remove(tag)
|
||||
else:
|
||||
# Not a real form
|
||||
tag.tag = XHTML('div')
|
||||
|
||||
for tag in XPath('//h:center')(root):
|
||||
tag.tag = XHTML('div')
|
||||
|
@ -189,7 +189,7 @@ class MOBIFile(object):
|
||||
def read_tbs(self):
|
||||
from calibre.ebooks.mobi.writer8.tbs import (Entry, DOC,
|
||||
collect_indexing_data, encode_strands_as_sequences,
|
||||
sequences_to_bytes)
|
||||
sequences_to_bytes, calculate_all_tbs, NegativeStrandIndex)
|
||||
entry_map = []
|
||||
for index in self.ncx_index:
|
||||
vals = list(index)[:-1] + [None, None, None, None]
|
||||
@ -206,6 +206,14 @@ class MOBIFile(object):
|
||||
the start of the text record.
|
||||
|
||||
''')]
|
||||
|
||||
tbs_type = 8
|
||||
try:
|
||||
calculate_all_tbs(indexing_data)
|
||||
except NegativeStrandIndex:
|
||||
calculate_all_tbs(indexing_data, tbs_type=5)
|
||||
tbs_type = 5
|
||||
|
||||
for i, strands in enumerate(indexing_data):
|
||||
rec = self.text_records[i]
|
||||
tbs_bytes = rec.trailing_data.get('indexing', b'')
|
||||
@ -236,8 +244,12 @@ class MOBIFile(object):
|
||||
desc.append('Sequence #%d: %r %r'%(j, seq[0], seq[1]))
|
||||
if tbs_bytes:
|
||||
desc.append('Remaining bytes: %s'%format_bytes(tbs_bytes))
|
||||
calculated_sequences = encode_strands_as_sequences(strands)
|
||||
calculated_sequences = encode_strands_as_sequences(strands,
|
||||
tbs_type=tbs_type)
|
||||
try:
|
||||
calculated_bytes = sequences_to_bytes(calculated_sequences)
|
||||
except:
|
||||
calculated_bytes = b'failed to calculate tbs bytes'
|
||||
if calculated_bytes != otbs:
|
||||
print ('WARNING: TBS mismatch for record %d'%i)
|
||||
desc.append('WARNING: TBS mismatch!')
|
||||
|
@ -71,6 +71,14 @@ def explode(path, dest, question=lambda x:True):
|
||||
return fork_job('calibre.ebooks.mobi.tweak', 'do_explode', args=(path,
|
||||
dest), no_output=True)['result']
|
||||
|
||||
def set_cover(oeb):
|
||||
if 'cover' not in oeb.guide or oeb.metadata['cover']: return
|
||||
cover = oeb.guide['cover']
|
||||
if cover.href in oeb.manifest.hrefs:
|
||||
item = oeb.manifest.hrefs[cover.href]
|
||||
oeb.metadata.clear('cover')
|
||||
oeb.metadata.add('cover', item.id)
|
||||
|
||||
def do_rebuild(opf, dest_path):
|
||||
plumber = Plumber(opf, dest_path, default_log)
|
||||
plumber.setup_options()
|
||||
@ -79,6 +87,7 @@ def do_rebuild(opf, dest_path):
|
||||
|
||||
plumber.opts.mobi_passthrough = True
|
||||
oeb = create_oebbook(default_log, opf, plumber.opts)
|
||||
set_cover(oeb)
|
||||
outp.convert(oeb, dest_path, inp, plumber.opts, default_log)
|
||||
|
||||
def rebuild(src_dir, dest_path):
|
||||
|
@ -56,7 +56,7 @@ def build_exth(metadata, prefer_author_sort=False, is_periodical=False,
|
||||
items][:1]
|
||||
else:
|
||||
creators = [unicode(c) for c in items]
|
||||
items = ['; '.join(creators)]
|
||||
items = creators
|
||||
for item in items:
|
||||
data = unicode(item)
|
||||
if term != 'description':
|
||||
|
@ -106,6 +106,9 @@ def collect_indexing_data(entries, text_record_lengths):
|
||||
|
||||
return data
|
||||
|
||||
class NegativeStrandIndex(Exception):
|
||||
pass
|
||||
|
||||
def encode_strands_as_sequences(strands, tbs_type=8):
|
||||
''' Encode the list of strands for a single text record into a list of
|
||||
sequences, ready to be converted into TBS bytes. '''
|
||||
@ -144,10 +147,16 @@ def encode_strands_as_sequences(strands, tbs_type=8):
|
||||
index = entries[0].index - (entries[0].parent or 0)
|
||||
if ans and not strand_seqs:
|
||||
# We are in the second or later strands, so we need to use a
|
||||
# special flag and index value. The index value if the entry
|
||||
# special flag and index value. The index value is the entry
|
||||
# index - the index of the last entry in the previous strand.
|
||||
extra[0b1000] = True
|
||||
index = last_index - entries[0].index
|
||||
if index < 0:
|
||||
if tbs_type == 5:
|
||||
index = -index
|
||||
else:
|
||||
raise NegativeStrandIndex()
|
||||
else:
|
||||
extra[0b1000] = True
|
||||
last_index = entries[-1].index
|
||||
strand_seqs.append((index, extra))
|
||||
|
||||
@ -167,20 +176,31 @@ def sequences_to_bytes(sequences):
|
||||
flag_size = 3
|
||||
for val, extra in sequences:
|
||||
ans.append(encode_tbs(val, extra, flag_size))
|
||||
flag_size = 4 # only the first seuqence has flag size 3 as all
|
||||
flag_size = 4 # only the first sequence has flag size 3 as all
|
||||
# subsequent sequences could need the 0b1000 flag
|
||||
return b''.join(ans)
|
||||
|
||||
def calculate_all_tbs(indexing_data, tbs_type=8):
|
||||
rmap = {}
|
||||
for i, strands in enumerate(indexing_data):
|
||||
sequences = encode_strands_as_sequences(strands, tbs_type=tbs_type)
|
||||
tbs_bytes = sequences_to_bytes(sequences)
|
||||
rmap[i+1] = tbs_bytes
|
||||
return rmap
|
||||
|
||||
def apply_trailing_byte_sequences(index_table, records, text_record_lengths):
|
||||
entries = tuple(Entry(r['index'], r['offset'], r['length'], r['depth'],
|
||||
r.get('parent', None), r.get('first_child', None), r.get('last_child',
|
||||
None), r['label'], None, None, None, None) for r in index_table)
|
||||
|
||||
indexing_data = collect_indexing_data(entries, text_record_lengths)
|
||||
for i, strands in enumerate(indexing_data):
|
||||
sequences = encode_strands_as_sequences(strands)
|
||||
tbs_bytes = sequences_to_bytes(sequences)
|
||||
records[i+1] += encode_trailing_data(tbs_bytes)
|
||||
try:
|
||||
rmap = calculate_all_tbs(indexing_data)
|
||||
except NegativeStrandIndex:
|
||||
rmap = calculate_all_tbs(indexing_data, tbs_type=5)
|
||||
|
||||
for i, tbs_bytes in rmap.iteritems():
|
||||
records[i] += encode_trailing_data(tbs_bytes)
|
||||
|
||||
return True
|
||||
|
||||
|
@ -6,7 +6,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import os
|
||||
import os, re
|
||||
from calibre.utils.date import isoformat, now
|
||||
from calibre import guess_type
|
||||
|
||||
@ -141,7 +141,7 @@ class MergeMetadata(object):
|
||||
item = self.oeb.manifest.hrefs[old_cover.href]
|
||||
if not cdata:
|
||||
return item.id
|
||||
self.oeb.manifest.remove(item)
|
||||
self.remove_old_cover(item)
|
||||
elif not cdata:
|
||||
id = self.oeb.manifest.generate(id='cover')
|
||||
self.oeb.manifest.add(id, old_cover.href, 'image/jpeg')
|
||||
@ -152,3 +152,41 @@ class MergeMetadata(object):
|
||||
self.oeb.guide.add('cover', 'Cover', href)
|
||||
return id
|
||||
|
||||
def remove_old_cover(self, cover_item):
|
||||
from calibre.ebooks.oeb.base import XPath
|
||||
from lxml import etree
|
||||
|
||||
self.oeb.manifest.remove(cover_item)
|
||||
|
||||
# Remove any references to the cover in the HTML
|
||||
affected_items = set()
|
||||
for item in self.oeb.spine:
|
||||
try:
|
||||
images = XPath('//h:img[@src]')(item.data)
|
||||
except:
|
||||
images = []
|
||||
removed = False
|
||||
for img in images:
|
||||
href = item.abshref(img.get('src'))
|
||||
if href == cover_item.href:
|
||||
img.getparent().remove(img)
|
||||
removed = True
|
||||
if removed:
|
||||
affected_items.add(item)
|
||||
|
||||
# Check if the resulting HTML has no content, if so remove it
|
||||
for item in affected_items:
|
||||
body = XPath('//h:body')(item.data)
|
||||
if body:
|
||||
text = etree.tostring(body[0], method='text', encoding=unicode)
|
||||
else:
|
||||
text = ''
|
||||
text = re.sub(r'\s+', '', text)
|
||||
if not text and not XPath('//h:img|//svg:svg')(item.data):
|
||||
self.log('Removing %s as it is a wrapper around'
|
||||
' the cover image'%item.href)
|
||||
self.oeb.spine.remove(item)
|
||||
self.oeb.manifest.remove(item)
|
||||
|
||||
|
||||
|
||||
|
@ -167,6 +167,8 @@ class EditorWidget(QWebView): # {{{
|
||||
self.action_remove_format.trigger()
|
||||
self.exec_command('delete')
|
||||
us.endMacro()
|
||||
self.set_font_style()
|
||||
self.setFocus(Qt.OtherFocusReason)
|
||||
|
||||
def link_clicked(self, url):
|
||||
open_url(url)
|
||||
@ -266,6 +268,10 @@ class EditorWidget(QWebView): # {{{
|
||||
|
||||
def fset(self, val):
|
||||
self.setHtml(val)
|
||||
self.set_font_style()
|
||||
return property(fget=fget, fset=fset)
|
||||
|
||||
def set_font_style(self):
|
||||
fi = QFontInfo(QApplication.font(self))
|
||||
f = fi.pixelSize() + 1 + int(tweaks['change_book_details_font_size_by'])
|
||||
fam = unicode(fi.family()).strip().replace('"', '')
|
||||
@ -278,8 +284,6 @@ class EditorWidget(QWebView): # {{{
|
||||
body.setAttribute('style', style)
|
||||
self.page().setContentEditable(True)
|
||||
|
||||
return property(fget=fget, fset=fset)
|
||||
|
||||
def keyPressEvent(self, ev):
|
||||
if ev.key() in (Qt.Key_Tab, Qt.Key_Escape, Qt.Key_Backtab):
|
||||
ev.ignore()
|
||||
|
@ -349,7 +349,8 @@ class Text(Base):
|
||||
return d.exec_()
|
||||
|
||||
def edit(self):
|
||||
if self.getter() != self.initial_val:
|
||||
if (self.getter() != self.initial_val and (self.getter() or
|
||||
self.initial_val)):
|
||||
d = self._save_dialog(self.parent, _('Values changed'),
|
||||
_('You have changed the values. In order to use this '
|
||||
'editor, you must either discard or apply these '
|
||||
|
@ -69,6 +69,22 @@ If you have a hand edited TOC in the input document, you can use the TOC detecti
|
||||
|
||||
Finally, I encourage you to ditch the content TOC and only have a metadata TOC in your ebooks. Metadata TOCs will give the people reading your ebooks a much superior navigation experience (except on the Kindle, where they are essentially the same as a content TOC).
|
||||
|
||||
The covers for my MOBI files have stopped showing up in Kindle for PC/Kindle for Android/etc.
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This is caused by a bug in the Amazon software. You can work around it by going
|
||||
to Preferences->Output Options->MOBI output and setting the "Enable sharing
|
||||
of book content" option. If you are reconverting a previously converted book,
|
||||
you will also have to enable the option in the conversion dialog for that
|
||||
individual book (as per book conversion settings are saved and take
|
||||
precedence).
|
||||
|
||||
Note that doing this will mean that the generated MOBI will show up under
|
||||
personal documents instead of Books on the Kindle Fire and Amazon whispersync
|
||||
will not work, but the covers will. It's your choice which functionality is
|
||||
more important to you. I encourage you to contact Amazon and ask them to fix
|
||||
this bug.
|
||||
|
||||
How do I convert a collection of HTML files in a specific order?
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
In order to convert a collection of HTML files in a specific oder, you have to create a table of contents file. That is, another HTML file that contains links to all the other files in the desired order. Such a file looks like::
|
||||
|
@ -245,7 +245,7 @@ The following functions are available in addition to those described in single-f
|
||||
* ``current_library_name() -- `` return the last name on the path to the current calibre library. This function can be called in template program mode using the template ``{:'current_library_name()'}``.
|
||||
* ``days_between(date1, date2)`` -- return the number of days between ``date1`` and ``date2``. The number is positive if ``date1`` is greater than ``date2``, otherwise negative. If either ``date1`` or ``date2`` are not dates, the function returns the empty string.
|
||||
* ``divide(x, y)`` -- returns x / y. Throws an exception if either x or y are not numbers.
|
||||
* ``eval(string)`` -- evaluates the string as a program, passing the local variables (those ``assign`` ed to). This permits using the template processor to construct complex results from local variables. Because the `{` and `}` characters are special, you must use `[[` for the `{` character and `]]` for the '}' character; they are converted automatically. Note also that prefixes and suffixes (the "|prefix|suffix" syntax) cannot be used in the argument to this function when using template program mode.
|
||||
* ``eval(string)`` -- evaluates the string as a program, passing the local variables (those ``assign`` ed to). This permits using the template processor to construct complex results from local variables. Because the `{` and `}` characters are special, you must use `[[` for the `{` character and `]]` for the '}' character; they are converted automatically. Note also that prefixes and suffixes (the `|prefix|suffix` syntax) cannot be used in the argument to this function when using template program mode.
|
||||
* ``field(name)`` -- returns the metadata field named by ``name``.
|
||||
* ``first_non_empty(value, value, ...)`` -- returns the first value that is not empty. If all values are empty, then the empty value is returned. You can have as many values as you want.
|
||||
* ``format_date(x, date_format)`` -- format_date(val, format_string) -- format the value, which must be a date field, using the format_string, returning a string. The formatting codes are::
|
||||
@ -306,7 +306,7 @@ The following functions are available in addition to those described in single-f
|
||||
* ``substr(str, start, end)`` -- returns the ``start``'th through the ``end``'th characters of ``str``. The first character in ``str`` is the zero'th character. If end is negative, then it indicates that many characters counting from the right. If end is zero, then it indicates the last character. For example, ``substr('12345', 1, 0)`` returns ``'2345'``, and ``substr('12345', 1, -1)`` returns ``'234'``.
|
||||
* ``subtract(x, y)`` -- returns x - y. Throws an exception if either x or y are not numbers.
|
||||
* ``today()`` -- return a date string for today. This value is designed for use in format_date or days_between, but can be manipulated like any other string. The date is in ISO format.
|
||||
* ``template(x)`` -- evaluates x as a template. The evaluation is done in its own context, meaning that variables are not shared between the caller and the template evaluation. Because the `{` and `}` characters are special, you must use `[[` for the `{` character and `]]` for the '}' character; they are converted automatically. For example, ``template('[[title_sort]]') will evaluate the template ``{title_sort}`` and return its value. Note also that prefixes and suffixes (the "|prefix|suffix" syntax) cannot be used in the argument to this function when using template program mode.
|
||||
* ``template(x)`` -- evaluates x as a template. The evaluation is done in its own context, meaning that variables are not shared between the caller and the template evaluation. Because the `{` and `}` characters are special, you must use `[[` for the `{` character and `]]` for the '}' character; they are converted automatically. For example, ``template('[[title_sort]]') will evaluate the template ``{title_sort}`` and return its value. Note also that prefixes and suffixes (the `|prefix|suffix` syntax) cannot be used in the argument to this function when using template program mode.
|
||||
|
||||
.. _template_functions_reference:
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user