Sync to trunk.

This commit is contained in:
John Schember 2012-03-17 11:11:10 -04:00
commit 1bee8e1f00
282 changed files with 113341 additions and 66727 deletions

View File

@ -19,6 +19,189 @@
# new recipes: # new recipes:
# - title: # - title:
- version: 0.8.43
date: 2012-03-16
new features:
- title: "Template language: Speedup evaluation of general program mode templates by pre-compiling them to python. If you experience errors with this optimization, you can turn it off via Preferences->Tweaks. Also other miscellaneous optimizations in evaluating templates with composite columns."
- title: "MOBI Output: Add an option to not convert all images to JPEG when creating MOBI files. For maximum compatibility of the produced MOBI files, do not use this option."
tickets: [954025]
- title: "Add iPad3 Output Profile"
bug fixes:
- title: "KF8 Input: Add support for KF8 files with obfuscated embedded fonts"
tickets: [953260]
- title: "Make the stars in the book list a little larger on windows >= vista"
- title: "Revised periodical Section layout, for touchscreen devices resolving iBooks problem with tables spanning multiple pages"
- title: "Read dc:contributor metadata from MOBI files"
- title: "MOBI Output: Fix a regression that caused the generated thumbnail embedded in calibre produced MOBI files to be a large, low quality image instead of a small, high quality image. You would have been affected by this bug only if you directly used the output from calibre, without exporting it via send to device or save to disk."
tickets: [954254]
- title: "KF8 Input: Recognize OpenType embedded fonts as well."
tickets: [954728]
- title: "Fix regression in 0.8.41 that caused file:/// URLs to stop working in the news download system on windows."
tickets: [955581]
- title: "When setting metadata in MOBI files fix cover not being updated if the mobi file has its first image record as the cover"
- title: "Fix column coloring rules based on the size column not working"
tickets: [953737]
improved recipes:
- Microwaves and RF
- idg.se
new recipes:
- title: SatMagazine
author: kiavash
- version: 0.8.42
date: 2012-03-12
new features:
- title: "Support for reading Amazon's new KF8 format"
type: major
description: "calibre can now both view and convert MOBI files that contain Amazon's new KF8 (Kindle Fire) format"
- title: "Add a tweak to Preferences->Tweaks to control the font size used in the book details panel"
tickets: [948357]
- title: "Allow specifying a list of file types to exclude when automatically adding files from a folder"
tickets: [943025]
- title: "Show ratings in the book details panel as stars. Also allow the user to change the alignment of the ratings column in the main books list. No longer display the stars in blue, instead their color can be customized via the column coloring rules, like any other column"
- title: "When setting metadata in EPUB ensure that the <meta name=cover> tag has its name attribute first. Needed for the Nook."
- title: "Drivers for Novo 7, LG G2x and Zenithink T-280"
tickets: [941671, 940625, 940527]
- title: "Update linux binaries to Qt 4.8.0"
bug fixes:
- title: "Fix some rar files causing crashes on OS X (updated libunrar.dylib in the OS X build)"
tickets: [951185]
- title: "MOBI Output: Ignore the Table of Contents pointed to by the guide, if it contains no links"
- title: "ODT Input: Ignore margin declaration in ODT styles if more specific margin-* declarations are present"
tickets: [941134]
- title: "Conversion pipeline: Fix @import rules in CSS stylesheets that have comments on their first few lines being ignored."
- title: "EPUB Input: When extracting the contents of epub files on windows, do not error out if one or more of the components in the epub file have filepaths containing characters that are invalid for the windows filesystem, instead, just replace those characters, since those entries are likely to be errors in the zip container anyway."
tickets: [950081]
- title: "Textile output: Fix issue with blockquotes and sentences getting removed."
- title: "MOBI Output: When using the prefer author sort conversion option, handle multiple authors better."
tickets: [947146]
- title: "Fix regression in 0.8.41 that broke direct connection to iDevices in windows"
tickets: [944534]
- title: "Fix the download bulk metadata completed popup causing a crash if the Esc key is pressed."
tickets: [943056]
- title: "Fix rating values doubled in CSV/XML catalogs"
tickets: [942790]
- title: "EPUB Input: Remove non markup documents from the spine automatically, instead of erroring out"
- title: "When formatting ratings in templates, etc., do not have an unnecessary .0"
- title: "Calibre portable: Do not allow calibre portable to run if it is placed in a location whose path is too long. Also hide the library location setup in the welcome wizard when running the portable build."
- title: "Fix regression in 0.8.41 that broke calibre if the TMP or TEMP environment variable is set to the root of a drive."
tickets: [952284]
- title: "Fix display of ratings type custom fields in the content server"
tickets: [940600]
improved recipes:
- La Jornada
- Chicago Tribune
- Mediapart
- rue89
new recipes:
- title: Racjonalista
author: Racjonlista
- title: JAPAA
author: adoucette
- version: 0.8.41
date: 2012-02-24
new features:
- title: "Driver for Sony Experia Play 4G"
tickets: [938831]
- title: "News download system: Allow use of __future__ in recipes, and do not change line numbers of code in the recipe when compiling it"
- title: "Use the My Documents folder as the default location for the Calibre Library folder on first start in windows"
tickets: [934840]
- title: "Add a tweak to Preferences->Tweaks to control the order in which categories appear in the Tag Browser"
- title: "Tag Browser: Add an entry to the right click menu to quickly delete tags"
tickets: [934509]
- title: "Amazon metadata download: Try to scrape series information from the amazon details page. Note that currently very few books have series info available. Often the page for hardcover will have series, but the Kindle edition will not. In such cases calibre may or may not find the series, depending on which page it ends up using."
- title: "Content server: Add favicon to OPDS feeds."
tickets: [934731]
bug fixes:
- title: "RTF Input: Fix some WMF images embedded in RTF files being distorted on conversion."
tickets: [934167]
- title: "Fix long standing bug preventing calibre from working on east asian windows installs when the user name in windows has non-ascii characters"
tickets: [937389]
- title: "Get Books: Fix Baen Webscription and O'Reilly stores. Fix price detection for Google Books"
- title: "MOBI Output: When the same anchor is present more than once in the input document, use the first occurrence rather than the last one."
tickets: [934031]
- title: "Use the 'default cover font' tweak when generating default masthead images as well"
tickets: [939256]
- title: "Fix content server does not correctly display custom field of type 'rating'"
tickets: [938303]
- title: "Fix welcome wizard does not save send-from email info unless send-to field is filled"
tickets: [937087]
- title: "When reading metadata from odt files, use initial-creator in preference to creator for setting the author field"
tickets: [934564]
- title: "Fix conversion erroring out when the input document has very long and thin images"
tickets: [935234]
improved recipes:
- The Sun
- Various Polish news sources
- Mediapart
new recipes:
- title: La pausa caffe
author: faber1971
- title: Various Polish news sources
author: fenuks
- version: 0.8.40 - version: 0.8.40
date: 2012-02-17 date: 2012-02-17

152
imgsrc/calibreSymbols.spd Normal file
View File

@ -0,0 +1,152 @@
SplineFontDB: 3.0
FontName: calibreSymbols
FullName: calibre Symbols
FamilyName: calibre Symbols
Weight: Medium
Copyright: Created by Kovid Goyal with FontForge 2.0 (http://fontforge.sf.net)
UComments: "2012-2-27: Created."
Version: 001.000
ItalicAngle: 0
UnderlinePosition: -100
UnderlineWidth: 50
Ascent: 800
Descent: 200
LayerCount: 2
Layer: 0 0 "Back" 1
Layer: 1 0 "Fore" 0
NeedsXUIDChange: 1
XUID: [1021 913 325894820 11538708]
FSType: 0
OS2Version: 0
OS2_WeightWidthSlopeOnly: 0
OS2_UseTypoMetrics: 1
CreationTime: 1330331997
ModificationTime: 1330487767
OS2TypoAscent: 0
OS2TypoAOffset: 1
OS2TypoDescent: 0
OS2TypoDOffset: 1
OS2TypoLinegap: 90
OS2WinAscent: 0
OS2WinAOffset: 1
OS2WinDescent: 0
OS2WinDOffset: 1
HheadAscent: 0
HheadAOffset: 1
HheadDescent: 0
HheadDOffset: 1
MarkAttachClasses: 1
DEI: 91125
Encoding: UnicodeFull
UnicodeInterp: none
NameList: Adobe Glyph List
DisplaySize: -24
AntiAlias: 1
FitToEm: 1
WidthSeparation: 150
WinInfo: 9600 75 22
BeginPrivate: 0
EndPrivate
BeginChars: 1114112 3
StartChar: uni2605
Encoding: 9733 9733 0
Width: 979
VWidth: -26
Flags: W
LayerCount: 2
Fore
SplineSet
551.923 352.862 m 1
749.497 369.592 l 2
804.954 374.123 833.379 376.389 834.765 376.389 c 0
852.095 376.389 860.761 368.896 860.761 353.907 c 0
860.761 347.981 859.028 343.363 855.562 340.052 c 0
852.095 336.74 825.578 319.225 776.012 287.506 c 2
609.635 180.323 l 1
716.22 -88.417 l 2
717.606 -91.2051 718.301 -95.3877 718.301 -100.965 c 0
718.301 -106.193 716.394 -110.725 712.58 -114.558 c 0
708.769 -118.393 704.608 -120.31 700.104 -120.31 c 0
695.943 -120.31 691.61 -118.828 687.103 -115.866 c 0
682.598 -112.902 658.162 -92.251 613.795 -53.9082 c 2
466.134 74.71 l 1
320.554 -51.8184 l 2
274.802 -91.5547 249.758 -112.902 245.426 -115.866 c 0
241.092 -118.828 236.846 -120.31 232.688 -120.31 c 0
227.835 -120.31 223.415 -118.306 219.429 -114.297 c 0
215.442 -110.289 213.449 -105.844 213.449 -100.965 c 0
213.449 -97.8281 223.329 -71.3379 243.087 -21.4932 c 2
322.115 180.323 l 1
152.618 289.598 l 2
104.783 320.271 79.2217 337.176 75.9297 340.313 c 0
72.6357 343.45 70.9893 347.981 70.9893 353.907 c 0
70.9893 369.243 79.8291 376.912 97.5059 376.912 c 0
98.8926 376.912 123.155 374.82 170.296 370.638 c 2
379.825 352.862 l 1
427.14 555.201 l 2
439.271 607.834 446.811 636.764 449.757 641.992 c 0
452.702 647.221 458.162 649.834 466.134 649.834 c 0
474.454 649.834 480 646.96 482.772 641.208 c 0
485.545 635.457 493.518 604.173 506.689 547.357 c 2
551.923 352.862 l 1
EndSplineSet
Validated: 524289
EndChar
StartChar: zero
Encoding: 48 48 1
Width: 1303
VWidth: 2048
Flags: W
HStem: -43.3789 76.7998<582.097 721.09> 623.341 76.7998<582.097 721.091>
VStem: 403.82 97.4395<148.044 508.66> 802.221 96.959<148.044 508.659>
LayerCount: 2
Fore
SplineSet
651.5 623.341 m 0
601.58 623.341 564.061 598.78 538.939 549.66 c 0
513.82 500.541 501.26 426.7 501.26 328.141 c 0
501.26 229.9 513.82 156.221 538.939 107.101 c 0
564.061 57.9805 601.58 33.4209 651.5 33.4209 c 0
701.74 33.4209 739.42 57.9805 764.54 107.101 c 0
789.66 156.221 802.221 229.9 802.221 328.141 c 0
802.221 426.7 789.66 500.541 764.54 549.66 c 0
739.42 598.78 701.74 623.341 651.5 623.341 c 0
651.5 700.141 m 0
731.82 700.141 793.18 668.38 835.58 604.859 c 0
877.979 541.341 899.18 449.101 899.18 328.141 c 0
899.18 207.5 877.979 115.421 835.58 51.9004 c 0
793.18 -11.6201 731.819 -43.3789 651.5 -43.3789 c 0
571.18 -43.3789 509.82 -11.6201 467.42 51.9004 c 0
425.021 115.421 403.82 207.5 403.82 328.141 c 0
403.82 449.101 425.021 541.341 467.42 604.859 c 0
509.82 668.38 571.18 700.141 651.5 700.141 c 0
EndSplineSet
Validated: 1
EndChar
StartChar: period
Encoding: 46 46 2
Width: 516
VWidth: 2048
Flags: W
HStem: 53.4004 166.199<203.263 309.297>
VStem: 174.6 163.801<82.9501 190.955>
LayerCount: 2
Fore
SplineSet
338.4 142.8 m 0
338.4 119.2 330.5 98.4004 314.7 80.4004 c 0
298.9 62.4004 277 53.4004 249 53.4004 c 0
225.4 53.4004 207.1 61.2002 194.1 76.7998 c 0
181.1 92.4004 174.6 111 174.6 132.6 c 0
174.6 155.8 182.6 176.1 198.6 193.5 c 0
214.6 210.9 236.8 219.6 265.2 219.6 c 0
288.8 219.6 306.9 212.2 319.5 197.4 c 0
332.1 182.6 338.4 164.4 338.4 142.8 c 0
EndSplineSet
Validated: 1
EndChar
EndChars
EndSplineFont

View File

@ -7,6 +7,7 @@ class Archeowiesci(BasicNewsRecipe):
language = 'pl' language = 'pl'
cover_url='http://archeowiesci.pl/wp-content/uploads/2011/05/Archeowiesci2-115x115.jpg' cover_url='http://archeowiesci.pl/wp-content/uploads/2011/05/Archeowiesci2-115x115.jpg'
oldest_article = 7 oldest_article = 7
needs_subscription='optional'
max_articles_per_feed = 100 max_articles_per_feed = 100
auto_cleanup = True auto_cleanup = True
remove_tags=[dict(name='span', attrs={'class':['post-ratings', 'post-ratings-loading']})] remove_tags=[dict(name='span', attrs={'class':['post-ratings', 'post-ratings-loading']})]
@ -16,6 +17,16 @@ class Archeowiesci(BasicNewsRecipe):
feeds = BasicNewsRecipe.parse_feeds(self) feeds = BasicNewsRecipe.parse_feeds(self)
for feed in feeds: for feed in feeds:
for article in feed.articles[:]: for article in feed.articles[:]:
if 'subskrypcja' in article.title: if self.username is None and 'subskrypcja' in article.title:
feed.articles.remove(article) feed.articles.remove(article)
return feeds return feeds
def get_browser(self):
br = BasicNewsRecipe.get_browser()
if self.username is not None and self.password is not None:
br.open('http://archeowiesci.pl/wp-login.php')
br.select_form(name='loginform')
br['log'] = self.username
br['pwd'] = self.password
br.submit()
return br

View File

@ -1,15 +1,18 @@
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
import re
class Astronomia_pl(BasicNewsRecipe): class Astronomia_pl(BasicNewsRecipe):
title = u'Astronomia.pl' title = u'Astronomia.pl'
__author__ = 'fenuks' __author__ = 'fenuks'
description = 'Astronomia - polish astronomy site' description = 'Astronomia - polish astronomy site'
masthead_url = 'http://www.astronomia.pl/grafika/logo.gif'
cover_url = 'http://www.astronomia.pl/grafika/logo.gif' cover_url = 'http://www.astronomia.pl/grafika/logo.gif'
category = 'astronomy, science' category = 'astronomy, science'
language = 'pl' language = 'pl'
oldest_article = 8 oldest_article = 8
max_articles_per_feed = 100 max_articles_per_feed = 100
#no_stylesheets=True extra_css='#h2 {font-size: 18px;}'
no_stylesheets=True
preprocess_regexps = [(re.compile(ur'<b>Przeczytaj także:.*?</BODY>', re.DOTALL), lambda match: '</BODY>') ]
remove_tags_before=dict(name='div', attrs={'id':'a1'}) remove_tags_before=dict(name='div', attrs={'id':'a1'})
keep_only_tags=[dict(name='div', attrs={'id':['a1', 'h2']})] keep_only_tags=[dict(name='div', attrs={'id':['a1', 'h2']})]
feeds = [(u'Wiadomości z astronomii i astronautyki', u'http://www.astronomia.pl/rss/')] feeds = [(u'Wiadomości z astronomii i astronautyki', u'http://www.astronomia.pl/rss/')]

View File

@ -4,16 +4,17 @@ class Benchmark_pl(BasicNewsRecipe):
title = u'Benchmark.pl' title = u'Benchmark.pl'
__author__ = 'fenuks' __author__ = 'fenuks'
description = u'benchmark.pl -IT site' description = u'benchmark.pl -IT site'
masthead_url = 'http://www.benchmark.pl/i/logo-footer.png'
cover_url = 'http://www.ieaddons.pl/benchmark/logo_benchmark_new.gif' cover_url = 'http://www.ieaddons.pl/benchmark/logo_benchmark_new.gif'
category = 'IT' category = 'IT'
language = 'pl' language = 'pl'
oldest_article = 8 oldest_article = 8
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets=True no_stylesheets=True
preprocess_regexps = [(re.compile(ur'\bWięcej o .*</body>', re.DOTALL|re.IGNORECASE), lambda match: '</body>')] preprocess_regexps = [(re.compile(ur'<h3><span style="font-size: small;">&nbsp;Zobacz poprzednie <a href="http://www.benchmark.pl/news/zestawienie/grupa_id/135">Opinie dnia:</a></span>.*</body>', re.DOTALL|re.IGNORECASE), lambda match: '</body>'), (re.compile(ur'Więcej o .*?</ul>', re.DOTALL|re.IGNORECASE), lambda match: '')]
keep_only_tags=[dict(name='div', attrs={'class':['m_zwykly', 'gallery']})] keep_only_tags=[dict(name='div', attrs={'class':['m_zwykly', 'gallery']})]
remove_tags_after=dict(name='div', attrs={'class':'body'}) remove_tags_after=dict(name='div', attrs={'class':'body'})
remove_tags=[dict(name='div', attrs={'class':['kategoria', 'socialize', 'thumb', 'panelOcenaObserwowane', 'categoryNextToSocializeGallery']})] remove_tags=[dict(name='div', attrs={'class':['kategoria', 'socialize', 'thumb', 'panelOcenaObserwowane', 'categoryNextToSocializeGallery']}), dict(name='table', attrs={'background':'http://www.benchmark.pl/uploads/backend_img/a/fotki_newsy/opinie_dnia/bg.png'}), dict(name='table', attrs={'width':'210', 'cellspacing':'1', 'cellpadding':'4', 'border':'0', 'align':'right'})]
INDEX= 'http://www.benchmark.pl' INDEX= 'http://www.benchmark.pl'
feeds = [(u'Aktualności', u'http://www.benchmark.pl/rss/aktualnosci-pliki.xml'), feeds = [(u'Aktualności', u'http://www.benchmark.pl/rss/aktualnosci-pliki.xml'),
(u'Testy i recenzje', u'http://www.benchmark.pl/rss/testy-recenzje-minirecenzje.xml')] (u'Testy i recenzje', u'http://www.benchmark.pl/rss/testy-recenzje-minirecenzje.xml')]

View File

@ -10,10 +10,11 @@ class Biolog_pl(BasicNewsRecipe):
description = u'Przyrodnicze aktualności ze świata nauki (codziennie aktualizowane), kurs biologii, testy i sprawdziany, forum dyskusyjne.' description = u'Przyrodnicze aktualności ze świata nauki (codziennie aktualizowane), kurs biologii, testy i sprawdziany, forum dyskusyjne.'
category = 'biology' category = 'biology'
language = 'pl' language = 'pl'
masthead_url= 'http://www.biolog.pl/naukowy,portal,biolog.png'
cover_url='http://www.biolog.pl/naukowy,portal,biolog.png' cover_url='http://www.biolog.pl/naukowy,portal,biolog.png'
no_stylesheets = True no_stylesheets = True
#keeps_only_tags=[dict(id='main')] #keeps_only_tags=[dict(id='main')]
remove_tags_before=dict(id='main') remove_tags_before=dict(id='main')
remove_tags_after=dict(name='a', attrs={'name':'komentarze'}) remove_tags_after=dict(name='a', attrs={'name':'komentarze'})
remove_tags=[dict(name='img', attrs={'alt':'Komentarze'})] remove_tags=[dict(name='img', attrs={'alt':'Komentarze'}), dict(name='span', attrs={'class':'menu_odsylacze'})]
feeds = [(u'Wszystkie', u'http://www.biolog.pl/backend.php'), (u'Medycyna', u'http://www.biolog.pl/medycyna-rss.php'), (u'Ekologia', u'http://www.biolog.pl/rss-ekologia.php'), (u'Genetyka i biotechnologia', u'http://www.biolog.pl/rss-biotechnologia.php'), (u'Botanika', u'http://www.biolog.pl/rss-botanika.php'), (u'Le\u015bnictwo', u'http://www.biolog.pl/rss-lesnictwo.php'), (u'Zoologia', u'http://www.biolog.pl/rss-zoologia.php')] feeds = [(u'Wszystkie', u'http://www.biolog.pl/backend.php'), (u'Medycyna', u'http://www.biolog.pl/medycyna-rss.php'), (u'Ekologia', u'http://www.biolog.pl/rss-ekologia.php'), (u'Genetyka i biotechnologia', u'http://www.biolog.pl/rss-biotechnologia.php'), (u'Botanika', u'http://www.biolog.pl/rss-botanika.php'), (u'Le\u015bnictwo', u'http://www.biolog.pl/rss-lesnictwo.php'), (u'Zoologia', u'http://www.biolog.pl/rss-zoologia.php')]

View File

@ -1,16 +1,20 @@
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class CD_Action(BasicNewsRecipe): class CD_Action(BasicNewsRecipe):
title = u'CD-Action' title = u'CD-Action'
__author__ = 'fenuks' __author__ = 'fenuks'
description = 'cdaction.pl - polish magazine about games site' description = 'cdaction.pl - polish games magazine site'
category = 'games' category = 'games'
language = 'pl' language = 'pl'
oldest_article = 8 oldest_article = 8
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets= True no_stylesheets= True
cover_url =u'http://s.cdaction.pl/obrazki/logo-CD-Action_172k9.JPG'
keep_only_tags= dict(id='news_content') keep_only_tags= dict(id='news_content')
remove_tags_after= dict(name='div', attrs={'class':'tresc'}) remove_tags_after= dict(name='div', attrs={'class':'tresc'})
feeds = [(u'Newsy', u'http://www.cdaction.pl/rss_newsy.xml')] feeds = [(u'Newsy', u'http://www.cdaction.pl/rss_newsy.xml')]
def get_cover_url(self):
soup = self.index_to_soup('http://www.cdaction.pl/magazyn/')
self.cover_url='http://www.cdaction.pl'+ soup.find(id='wspolnik').div.a['href']
return getattr(self, 'cover_url', self.cover_url)

View File

@ -5,6 +5,7 @@ class CGM(BasicNewsRecipe):
oldest_article = 7 oldest_article = 7
__author__ = 'fenuks' __author__ = 'fenuks'
description = u'Codzienna Gazeta Muzyczna' description = u'Codzienna Gazeta Muzyczna'
masthead_url='http://www.cgm.pl/img/header/logo.gif'
cover_url = 'http://www.krafcy.com/foto/tinymce/Image/cgm%281%29.jpg' cover_url = 'http://www.krafcy.com/foto/tinymce/Image/cgm%281%29.jpg'
category = 'music' category = 'music'
language = 'pl' language = 'pl'
@ -23,21 +24,19 @@ class CGM(BasicNewsRecipe):
def preprocess_html(self, soup): def preprocess_html(self, soup):
gallery=soup.find('div', attrs={'class':'galleryFlash'})
if gallery:
img=gallery.div
gallery.img.extract()
if img:
img=img['style']
img='http://www.cgm.pl'+img[img.find('url(')+4:img.find(')')]
gallery.contents[1].name='img'
gallery.contents[1]['src']=img
for item in soup.findAll(style=True): for item in soup.findAll(style=True):
del item['style'] del item['style']
ad=soup.findAll('a') ad=soup.findAll('a')
for r in ad: for r in ad:
if 'http://www.hustla.pl' in r['href'] or 'http://www.ebilet.pl' in r['href']: if 'www.hustla.pl' in r['href'] or 'www.ebilet.pl' in r['href']:
r.extract() r.extract()
gallery=soup.find('div', attrs={'class':'galleryFlash'})
if gallery:
img=gallery.find('embed')
if img:
img=img['src'][35:]
img='http://www.cgm.pl/_vault/_gallery/_photo/'+img
param=gallery.findAll(name='param')
for i in param:
i.extract()
gallery.contents[1].name='img'
gallery.contents[1]['src']=img
return soup return soup

View File

@ -3,6 +3,7 @@ __license__ = 'GPL 3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>' __copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en' __docformat__ = 'restructuredtext en'
import urllib, re
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class ChicagoTribune(BasicNewsRecipe): class ChicagoTribune(BasicNewsRecipe):
@ -77,10 +78,17 @@ class ChicagoTribune(BasicNewsRecipe):
def get_article_url(self, article): def get_article_url(self, article):
url = article.get('feedburner_origlink', article.get('guid', article.get('link'))) ans = None
if url.endswith('?track=rss'): try:
url = url.partition('?')[0] s = article.summary
return url ans = urllib.unquote(
re.search(r'href=".+?bookmark.cfm.+?link=(.+?)"', s).group(1))
except:
pass
if ans is None:
ans = article.get('feedburner_origlink', article.get('guid', article.get('link')))
if ans is not None:
return ans.replace('?track=rss', '')
def skip_ad_pages(self, soup): def skip_ad_pages(self, soup):
text = soup.find(text='click here to continue to article') text = soup.find(text='click here to continue to article')

View File

@ -33,6 +33,32 @@ class ChristianScienceMonitor(BasicNewsRecipe):
remove_javascript = True remove_javascript = True
no_stylesheets = True no_stylesheets = True
requires_version = (0, 8, 39)
def preprocess_raw_html(self, raw, url):
try:
from html5lib import parse
root = parse(raw, namespaceHTMLElements=False,
treebuilder='lxml').getroot()
from lxml import etree
for tag in root.xpath(
'//script|//style|//noscript|//meta|//link|//object'):
tag.getparent().remove(tag)
for elem in list(root.iterdescendants(tag=etree.Comment)):
elem.getparent().remove(elem)
ans = etree.tostring(root, encoding=unicode)
ans = re.sub('.*<html', '<html', ans, flags=re.DOTALL)
return ans
except:
import traceback
traceback.print_exc()
raise
def index_to_soup(self, url):
raw = BasicNewsRecipe.index_to_soup(self, url,
raw=True).decode('utf-8')
raw = self.preprocess_raw_html(raw, url)
return BasicNewsRecipe.index_to_soup(self, raw)
def append_page(self, soup, appendtag, position): def append_page(self, soup, appendtag, position):
nav = soup.find('div',attrs={'class':'navigation'}) nav = soup.find('div',attrs={'class':'navigation'})
@ -78,14 +104,6 @@ class ChristianScienceMonitor(BasicNewsRecipe):
print_soup = soup print_soup = soup
return print_soup return print_soup
preprocess_regexps = [ (re.compile(i[0], re.IGNORECASE | re.DOTALL), i[1]) for i in
[
(r'<!--.*?-->', lambda match : ''),
(r'<body.*?<div id="story"', lambda match : '<body><div id="story"'),
(r'<div class="pubdate">.*?</div>', lambda m: ''),
(r'Full HTML version of this story which may include photos, graphics, and related links.*</body>',
lambda match : '</body>'),
]]
extra_css = ''' extra_css = '''
h1{ color:#000000;font-family: Georgia,Times,"Times New Roman",serif; font-size: large} h1{ color:#000000;font-family: Georgia,Times,"Times New Roman",serif; font-size: large}
.sub{ color:#000000;font-family: Georgia,Times,"Times New Roman",serif; font-size: small;} .sub{ color:#000000;font-family: Georgia,Times,"Times New Roman",serif; font-size: small;}

View File

@ -0,0 +1,48 @@
from calibre.web.feeds.news import BasicNewsRecipe
import re
class Ciekawostki_Historyczne(BasicNewsRecipe):
title = u'Ciekawostki Historyczne'
oldest_article = 7
__author__ = 'fenuks'
description = u'Serwis popularnonaukowy - odkrycia, kontrowersje, historia, ciekawostki, badania, ciekawostki z przeszłości.'
category = 'history'
language = 'pl'
masthead_url= 'http://ciekawostkihistoryczne.pl/wp-content/themes/Wordpress_Magazine/images/logo-ciekawostki-historyczne-male.jpg'
cover_url='http://ciekawostkihistoryczne.pl/wp-content/themes/Wordpress_Magazine/images/logo-ciekawostki-historyczne-male.jpg'
max_articles_per_feed = 100
preprocess_regexps = [(re.compile(ur'Ten artykuł ma kilka stron.*?</fb:like>', re.DOTALL), lambda match: ''), (re.compile(ur'<h2>Zobacz też:</h2>.*?</ol>', re.DOTALL), lambda match: '')]
no_stylesheets=True
remove_empty_feeds=True
keep_only_tags=[dict(name='div', attrs={'class':'post'})]
remove_tags=[dict(id='singlepostinfo')]
feeds = [(u'Staro\u017cytno\u015b\u0107', u'http://ciekawostkihistoryczne.pl/tag/starozytnosc/feed/'), (u'\u015aredniowiecze', u'http://ciekawostkihistoryczne.pl/tag/sredniowiecze/feed/'), (u'Nowo\u017cytno\u015b\u0107', u'http://ciekawostkihistoryczne.pl/tag/nowozytnosc/feed/'), (u'XIX wiek', u'http://ciekawostkihistoryczne.pl/tag/xix-wiek/feed/'), (u'1914-1939', u'http://ciekawostkihistoryczne.pl/tag/1914-1939/feed/'), (u'1939-1945', u'http://ciekawostkihistoryczne.pl/tag/1939-1945/feed/'), (u'Powojnie (od 1945)', u'http://ciekawostkihistoryczne.pl/tag/powojnie/feed/'), (u'Recenzje', u'http://ciekawostkihistoryczne.pl/category/recenzje/feed/')]
def append_page(self, soup, appendtag):
tag=soup.find(name='h7')
if tag:
if tag.br:
pass
elif tag.nextSibling.name=='p':
tag=tag.nextSibling
nexturl = tag.findAll('a')
for nextpage in nexturl:
tag.extract()
nextpage= nextpage['href']
soup2 = self.index_to_soup(nextpage)
pagetext = soup2.find(name='div', attrs={'class':'post'})
for r in pagetext.findAll('div', attrs={'id':'singlepostinfo'}):
r.extract()
for r in pagetext.findAll('div', attrs={'class':'wp-caption alignright'}):
r.extract()
for r in pagetext.findAll('h1'):
r.extract()
pagetext.find('h6').nextSibling.extract()
pagetext.find('h7').nextSibling.extract()
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
def preprocess_html(self, soup):
self.append_page(soup, soup.body)
return soup

View File

@ -7,10 +7,11 @@ class Computerworld_pl(BasicNewsRecipe):
description = u'Serwis o IT w przemyśle, finansach, handlu, administracji oraz rynku IT i telekomunikacyjnym - wiadomości, opinie, analizy, porady prawne' description = u'Serwis o IT w przemyśle, finansach, handlu, administracji oraz rynku IT i telekomunikacyjnym - wiadomości, opinie, analizy, porady prawne'
category = 'IT' category = 'IT'
language = 'pl' language = 'pl'
masthead_url= 'http://g1.computerworld.pl/cw/beta_gfx/cw2.gif'
no_stylesheets=True no_stylesheets=True
oldest_article = 7 oldest_article = 7
max_articles_per_feed = 100 max_articles_per_feed = 100
keep_only_tags=[dict(name='div', attrs={'id':'s'})] keep_only_tags=[dict(attrs={'class':['tyt_news', 'prawo', 'autor', 'tresc']})]
remove_tags_after=dict(name='div', attrs={'class':'rMobi'}) remove_tags_after=dict(name='div', attrs={'class':'rMobi'})
remove_tags=[dict(name='div', attrs={'class':['nnav', 'rMobi']}), dict(name='table', attrs={'class':'ramka_slx'})] remove_tags=[dict(name='div', attrs={'class':['nnav', 'rMobi']}), dict(name='table', attrs={'class':'ramka_slx'})]
feeds = [(u'Wiadomo\u015bci', u'http://rssout.idg.pl/cw/news_iso.xml')] feeds = [(u'Wiadomo\u015bci', u'http://rssout.idg.pl/cw/news_iso.xml')]

View File

@ -7,6 +7,7 @@ class Dobreprogramy_pl(BasicNewsRecipe):
__licence__ ='GPL v3' __licence__ ='GPL v3'
category = 'IT' category = 'IT'
language = 'pl' language = 'pl'
masthead_url='http://static.dpcdn.pl/css/Black/Images/header_logo_napis_fullVersion.png'
cover_url = 'http://userlogos.org/files/logos/Karmody/dobreprogramy_01.png' cover_url = 'http://userlogos.org/files/logos/Karmody/dobreprogramy_01.png'
description = u'Aktualności i blogi z dobreprogramy.pl' description = u'Aktualności i blogi z dobreprogramy.pl'
encoding = 'utf-8' encoding = 'utf-8'
@ -16,7 +17,8 @@ class Dobreprogramy_pl(BasicNewsRecipe):
oldest_article = 8 oldest_article = 8
max_articles_per_feed = 100 max_articles_per_feed = 100
preprocess_regexps = [(re.compile(ur'<div id="\S+360pmp4">Twoja przeglądarka nie obsługuje Flasha i HTML5 lub wyłączono obsługę JavaScript...</div>'), lambda match: '') ] preprocess_regexps = [(re.compile(ur'<div id="\S+360pmp4">Twoja przeglądarka nie obsługuje Flasha i HTML5 lub wyłączono obsługę JavaScript...</div>'), lambda match: '') ]
remove_tags = [dict(name='div', attrs={'class':['komentarze', 'block', 'portalInfo', 'menuBar', 'topBar']})] keep_only_tags=[dict(attrs={'class':['news', 'entry single']})]
keep_only_tags = [dict(name='div', attrs={'class':['mainBar', 'newsContent', 'postTitle title', 'postInfo', 'contentText', 'content']})] remove_tags = [dict(name='div', attrs={'class':['newsOptions', 'noPrint', 'komentarze', 'tags font-heading-master']})]
#remove_tags = [dict(name='div', attrs={'class':['komentarze', 'block', 'portalInfo', 'menuBar', 'topBar']})]
feeds = [(u'Aktualności', 'http://feeds.feedburner.com/dobreprogramy/Aktualnosci'), feeds = [(u'Aktualności', 'http://feeds.feedburner.com/dobreprogramy/Aktualnosci'),
('Blogi', 'http://feeds.feedburner.com/dobreprogramy/BlogCzytelnikow')] ('Blogi', 'http://feeds.feedburner.com/dobreprogramy/BlogCzytelnikow')]

View File

@ -8,15 +8,17 @@ class Dziennik_pl(BasicNewsRecipe):
description = u'Wiadomości z kraju i ze świata. Wiadomości gospodarcze. Znajdziesz u nas informacje, wydarzenia, komentarze, opinie.' description = u'Wiadomości z kraju i ze świata. Wiadomości gospodarcze. Znajdziesz u nas informacje, wydarzenia, komentarze, opinie.'
category = 'newspaper' category = 'newspaper'
language = 'pl' language = 'pl'
cover_url='http://6.s.dziennik.pl/images/og_dziennik.jpg' masthead_url= 'http://5.s.dziennik.pl/images/logos.png'
cover_url= 'http://5.s.dziennik.pl/images/logos.png'
no_stylesheets = True no_stylesheets = True
oldest_article = 7 oldest_article = 7
max_articles_per_feed = 100 max_articles_per_feed = 100
remove_javascript=True remove_javascript=True
remove_empty_feeds=True remove_empty_feeds=True
preprocess_regexps = [(re.compile("Komentarze:"), lambda m: '')] extra_css= 'ul {list-style: none; padding: 0; margin: 0;} li {float: left;margin: 0 0.15em;}'
preprocess_regexps = [(re.compile("Komentarze:"), lambda m: ''), (re.compile('<p><strong><a href=".*?">&gt;&gt;&gt; CZYTAJ TAKŻE: ".*?"</a></strong></p>'), lambda m: '')]
keep_only_tags=[dict(id='article')] keep_only_tags=[dict(id='article')]
remove_tags=[dict(name='div', attrs={'class':['art_box_dodatki', 'new_facebook_icons2', 'leftArt', 'article_print', 'quiz-widget']}), dict(name='a', attrs={'class':'komentarz'})] remove_tags=[dict(name='div', attrs={'class':['art_box_dodatki', 'new_facebook_icons2', 'leftArt', 'article_print', 'quiz-widget', 'belka-spol', 'belka-spol belka-spol-bottom', 'art_data_tags', 'cl_right', 'boxRounded gal_inside']}), dict(name='a', attrs={'class':['komentarz', 'article_icon_addcommnent']})]
feeds = [(u'Wszystko', u'http://rss.dziennik.pl/Dziennik-PL/'), feeds = [(u'Wszystko', u'http://rss.dziennik.pl/Dziennik-PL/'),
(u'Wiadomości', u'http://rss.dziennik.pl/Dziennik-Wiadomosci'), (u'Wiadomości', u'http://rss.dziennik.pl/Dziennik-Wiadomosci'),
(u'Gospodarka', u'http://rss.dziennik.pl/Dziennik-Gospodarka'), (u'Gospodarka', u'http://rss.dziennik.pl/Dziennik-Gospodarka'),
@ -30,6 +32,12 @@ class Dziennik_pl(BasicNewsRecipe):
(u'Podróże', u'http://rss.dziennik.pl/Dziennik-Podroze/'), (u'Podróże', u'http://rss.dziennik.pl/Dziennik-Podroze/'),
(u'Nieruchomości', u'http://rss.dziennik.pl/Dziennik-Nieruchomosci')] (u'Nieruchomości', u'http://rss.dziennik.pl/Dziennik-Nieruchomosci')]
def skip_ad_pages(self, soup):
tag=soup.find(name='a', attrs={'title':'CZYTAJ DALEJ'})
if tag:
new_soup=self.index_to_soup(tag['href'], raw=True)
return new_soup
def append_page(self, soup, appendtag): def append_page(self, soup, appendtag):
tag=soup.find('a', attrs={'class':'page_next'}) tag=soup.find('a', attrs={'class':'page_next'})
if tag: if tag:
@ -56,3 +64,4 @@ class Dziennik_pl(BasicNewsRecipe):
def preprocess_html(self, soup): def preprocess_html(self, soup):
self.append_page(soup, soup.body) self.append_page(soup, soup.body)
return soup return soup

View File

@ -10,7 +10,8 @@ class Filmweb_pl(BasicNewsRecipe):
oldest_article = 8 oldest_article = 8
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets= True no_stylesheets= True
extra_css = '.hdrBig {font-size:22px;}' remove_empty_feeds=True
extra_css = '.hdrBig {font-size:22px;} ul {list-style-type:none; padding: 0; margin: 0;}'
remove_tags= [dict(name='div', attrs={'class':['recommendOthers']}), dict(name='ul', attrs={'class':'fontSizeSet'})] remove_tags= [dict(name='div', attrs={'class':['recommendOthers']}), dict(name='ul', attrs={'class':'fontSizeSet'})]
keep_only_tags= [dict(name='h1', attrs={'class':'hdrBig'}), dict(name='div', attrs={'class':['newsInfo', 'reviewContent fontSizeCont description']})] keep_only_tags= [dict(name='h1', attrs={'class':'hdrBig'}), dict(name='div', attrs={'class':['newsInfo', 'reviewContent fontSizeCont description']})]
feeds = [(u'Wszystkie newsy', u'http://www.filmweb.pl/feed/news/latest'), feeds = [(u'Wszystkie newsy', u'http://www.filmweb.pl/feed/news/latest'),

View File

@ -0,0 +1,21 @@
from calibre.web.feeds.news import BasicNewsRecipe
class Gameplay_pl(BasicNewsRecipe):
title = u'Gameplay.pl'
oldest_article = 7
__author__ = 'fenuks'
description = u'gameplay.pl - serwis o naszych zainteresowaniach, grach, filmach, książkach, muzyce, fotografii i konsolach.'
category = 'games, movies, books, music'
language = 'pl'
masthead_url= 'http://gameplay.pl/img/gpy_top_logo.png'
cover_url= 'http://gameplay.pl/img/gpy_top_logo.png'
max_articles_per_feed = 100
no_stylesheets= True
keep_only_tags=[dict(name='div', attrs={'class':['news_endpage_tit', 'news']})]
remove_tags=[dict(name='div', attrs={'class':['galeria', 'noedit center im']})]
feeds = [(u'Wiadomo\u015bci', u'http://gameplay.pl/rss/')]
def image_url_processor(self, baseurl, url):
if 'http' not in url:
return 'http://gameplay.pl'+ url[2:]
else:
return url

View File

@ -4,10 +4,11 @@ from calibre.web.feeds.news import BasicNewsRecipe
class Gazeta_Wyborcza(BasicNewsRecipe): class Gazeta_Wyborcza(BasicNewsRecipe):
title = u'Gazeta Wyborcza' title = u'Gazeta Wyborcza'
__author__ = 'fenuks' __author__ = 'fenuks'
cover_url = 'http://bi.gazeta.pl/im/5/10285/z10285445AA.jpg'
language = 'pl' language = 'pl'
description ='news from gazeta.pl' description ='news from gazeta.pl'
category='newspaper' category='newspaper'
publication_type = 'newspaper'
masthead_url='http://bi.gazeta.pl/im/5/10285/z10285445AA.jpg'
INDEX='http://wyborcza.pl' INDEX='http://wyborcza.pl'
remove_empty_feeds= True remove_empty_feeds= True
oldest_article = 3 oldest_article = 3
@ -81,3 +82,10 @@ class Gazeta_Wyborcza(BasicNewsRecipe):
return url return url
else: else:
return url.replace('http://wyborcza.biz/biznes/1', 'http://wyborcza.biz/biznes/2029020') return url.replace('http://wyborcza.biz/biznes/1', 'http://wyborcza.biz/biznes/2029020')
def get_cover_url(self):
soup = self.index_to_soup('http://wyborcza.pl/0,76762,3751429.html')
cover=soup.find(id='GWmini2')
soup = self.index_to_soup('http://wyborcza.pl/'+ cover.contents[3].a['href'])
self.cover_url='http://wyborcza.pl' + soup.img['src']
return getattr(self, 'cover_url', self.cover_url)

View File

@ -8,29 +8,31 @@ class Gry_online_pl(BasicNewsRecipe):
language = 'pl' language = 'pl'
oldest_article = 13 oldest_article = 13
INDEX= 'http://www.gry-online.pl/' INDEX= 'http://www.gry-online.pl/'
cover_url='http://www.gry-online.pl/img/1st_10/1st-gol-logo.png' masthead_url='http://www.gry-online.pl/im/gry-online-logo.png'
cover_url='http://www.gry-online.pl/im/gry-online-logo.png'
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets= True no_stylesheets= True
extra_css = 'p.wn1{font-size:22px;}' keep_only_tags=[dict(name='div', attrs={'class':'gc660'})]
remove_tags_after= [dict(name='div', attrs={'class':['tresc-newsa']})] remove_tags=[dict({'class':['nav-social', 'add-info', 'smlb', 'lista lista3 lista-gry', 'S013po', 'zm_gfx_cnt_bottom', 'ocen-txt', 'wiecej-txt', 'wiecej-txt2']})]
keep_only_tags = [dict(name='div', attrs={'class':['txthead']}), dict(name='p', attrs={'class':['wtx1', 'wn1', 'wob']}), dict(name='a', attrs={'class':['num_str_nex']})]
#remove_tags= [dict(name='div', attrs={'class':['news_plat']})]
feeds = [(u'Newsy', 'http://www.gry-online.pl/rss/news.xml'), ('Teksty', u'http://www.gry-online.pl/rss/teksty.xml')] feeds = [(u'Newsy', 'http://www.gry-online.pl/rss/news.xml'), ('Teksty', u'http://www.gry-online.pl/rss/teksty.xml')]
def append_page(self, soup, appendtag): def append_page(self, soup, appendtag):
nexturl = soup.find('a', attrs={'class':'num_str_nex'}) tag = appendtag.find('div', attrs={'class':'n5p'})
if appendtag.find('a', attrs={'class':'num_str_nex'}) is not None: if tag:
appendtag.find('a', attrs={'class':'num_str_nex'}).replaceWith('\n') nexturls=tag.findAll('a')
if nexturl is not None: for nexturl in nexturls[1:]:
if 'strona' in nexturl.div.string: try:
nexturl= self.INDEX + nexturl['href'] soup2 = self.index_to_soup('http://www.gry-online.pl/S020.asp'+ nexturl['href'])
soup2 = self.index_to_soup(nexturl) except:
pagetext = soup2.findAll(name='p', attrs={'class':['wtx1', 'wn1', 'wob']}) soup2 = self.index_to_soup('http://www.gry-online.pl/S022.asp'+ nexturl['href'])
for tag in pagetext: pagetext = soup2.find(attrs={'class':'gc660'})
pos = len(appendtag.contents) for r in pagetext.findAll(name='header'):
appendtag.insert(pos, tag) r.extract()
self.append_page(soup2, appendtag) pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
for r in appendtag.findAll(attrs={'class':['n5p', 'add-info', 'twitter-share-button']}):
r.extract()
def preprocess_html(self, soup): def preprocess_html(self, soup):

Binary file not shown.

After

Width:  |  Height:  |  Size: 994 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 991 B

BIN
recipes/icons/in4_pl.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 357 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 808 B

BIN
recipes/icons/kresy_pl.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.0 KiB

BIN
recipes/icons/mediapart.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 382 B

BIN
recipes/icons/oclab_pl.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 881 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 817 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 366 B

BIN
recipes/icons/pc_arena.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.8 KiB

BIN
recipes/icons/pc_foster.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 694 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 322 B

BIN
recipes/icons/pure_pc.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 386 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 850 B

BIN
recipes/icons/rue89.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 KiB

BIN
recipes/icons/tanuki.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1017 B

BIN
recipes/icons/tvn24.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 KiB

View File

@ -4,7 +4,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class IDGse(BasicNewsRecipe): class IDGse(BasicNewsRecipe):
title = 'IDG' title = 'IDG'
__author__ = 'zapt0' __author__ = 'Stanislav Khromov'
language = 'sv' language = 'sv'
description = 'IDG.se' description = 'IDG.se'
oldest_article = 1 oldest_article = 1
@ -15,6 +15,9 @@ class IDGse(BasicNewsRecipe):
feeds = [(u'Dagens IDG-nyheter',u'http://feeds.idg.se/idg/ETkj?format=xml')] feeds = [(u'Dagens IDG-nyheter',u'http://feeds.idg.se/idg/ETkj?format=xml')]
def get_article_url(self, article):
return article.get('guid', None)
def print_version(self,url): def print_version(self,url):
return url + '?articleRenderMode=print&m=print' return url + '?articleRenderMode=print&m=print'

44
recipes/in4_pl.recipe Normal file
View File

@ -0,0 +1,44 @@
from calibre.web.feeds.news import BasicNewsRecipe
import re
class in4(BasicNewsRecipe):
title = u'IN4.pl'
oldest_article = 7
max_articles_per_feed = 100
__author__ = 'fenuks'
description = u'Serwis Informacyjny - Aktualnosci, recenzje'
category = 'IT'
language = 'pl'
#cover_url= 'http://www.in4.pl/recenzje/337/in4pl.jpg'
no_stylesheets = True
remove_empty_feeds = True
preprocess_regexps = [(re.compile(ur'<a title="translate into.*?</a>', re.DOTALL), lambda match: '') ]
keep_only_tags=[dict(name='div', attrs={'class':'left_alone'})]
remove_tags_after=dict(name='img', attrs={'title':'komentarze'})
remove_tags=[dict(name='img', attrs={'title':'komentarze'})]
feeds = [(u'Wiadomo\u015bci', u'http://www.in4.pl/rss.php'), (u'Recenzje', u'http://www.in4.pl/rss_recenzje.php'), (u'Mini recenzje', u'http://www.in4.pl/rss_mini.php')]
def append_page(self, soup, appendtag):
a=soup.findAll('a')
nexturl=None
for i in a:
if i.string and 'następna str' in i.string:
nexturl='http://www.in4.pl/' + i['href']
i.extract()
while nexturl:
soup2 = self.index_to_soup(nexturl)
pagetext = soup2.find(id='news')
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
nexturl=None
tag=soup2.findAll('a')
for z in tag:
if z.string and u'następna str' in z.string:
nexturl='http://www.in4.pl/' + z['href']
break
def preprocess_html(self, soup):
self.append_page(soup, soup.body)
return soup

View File

@ -0,0 +1,18 @@
from calibre.web.feeds.news import BasicNewsRecipe
import re
class Informacje_USA(BasicNewsRecipe):
title = u'Informacje USA'
oldest_article = 7
max_articles_per_feed = 100
__author__ = 'fenuks'
description = u'portal wiadomości amerykańskich'
category = 'news'
language = 'pl'
masthead_url= 'http://www.informacjeusa.com/wp-content/add_images/top_logo_5_2010.jpg'
cover_url='http://www.informacjeusa.com/wp-content/add_images/top_logo_5_2010.jpg'
no_stylesheets = True
preprocess_regexps = [(re.compile(ur'<p>Zobacz:.*?</p>', re.DOTALL), lambda match: ''), (re.compile(ur'<p><a href=".*?Zobacz także:.*?</a></p>', re.DOTALL), lambda match: ''), (re.compile(ur'<p><p>Zobacz też:.*?</a></p>', re.DOTALL), lambda match: '')]
keep_only_tags=[dict(name='div', attrs={'class':'box box-single'})]
remove_tags_after= dict(attrs={'class':'tags'})
remove_tags= [dict(attrs={'class':['postmetadata', 'tags', 'banner']}), dict(name='a', attrs={'title':['Drukuj', u'Wyślij']})]
feeds = [(u'Informacje', u'http://www.informacjeusa.com/feed/')]

View File

@ -1,8 +1,9 @@
#v2 2011-07-25
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1299694372(BasicNewsRecipe): class AdvancedUserRecipe1299694372(BasicNewsRecipe):
title = u'Instapaper' title = u'Instapaper'
__author__ = 'Darko Miletic' __author__ = 'Darko Miletic, Stanislav Khromov'
publisher = 'Instapaper.com' publisher = 'Instapaper.com'
category = 'info, custom, Instapaper' category = 'info, custom, Instapaper'
oldest_article = 365 oldest_article = 365
@ -15,6 +16,8 @@ class AdvancedUserRecipe1299694372(BasicNewsRecipe):
,dict(name='div', attrs={'id':'text_controls'}) ,dict(name='div', attrs={'id':'text_controls'})
,dict(name='div', attrs={'id':'editing_controls'}) ,dict(name='div', attrs={'id':'editing_controls'})
,dict(name='div', attrs={'class':'bar bottom'}) ,dict(name='div', attrs={'class':'bar bottom'})
,dict(name='div', attrs={'id':'controlbar_container'})
,dict(name='div', attrs={'id':'footer'})
] ]
use_embedded_content = False use_embedded_content = False
needs_subscription = True needs_subscription = True

99
recipes/japaa.recipe Normal file
View File

@ -0,0 +1,99 @@
import re
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1330393641(BasicNewsRecipe):
title = u'JAAPA'
__author__ = 'adoucette'
language = 'en'
oldest_article = 30
max_articles_per_feed = 100
auto_cleanup = True
def get_cover_url(self):
cover_url = None
soup = self.index_to_soup('http://www.jaapa.com')
cover_item = soup.find('img', src=re.compile(r'\w*?cover\w{1,22}\.jpg'))
if cover_item:
cover_url = cover_item['src']
return cover_url
feeds = [
(u'CME Articles',
u'http://feeds.feedburner.com/jaapacmearticles'),
(u'A Day in the Life',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=490'),
(u'Ask A Librarian',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=847'),
(u'Case of the Month',
u'http://feeds.feedburner.com/jaapacaseofthemonth'),
(u'Clinical Watch',
u'http://feeds.feedburner.com/jaapaclinicalwatch'),
(u'Commentary',
u'http://feeds.feedburner.com/jaapacommentary'),
(u'Critically Appraised Topic',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=699'),
(u'Dermatology Digest',
u'http://feeds.feedburner.com/jaapadermatologydigest'),
(u'Diagnostic Imaging Review',
u'http://feeds.feedburner.com/jaapadiagnosticimagingreview'),
(u'Editorial',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=759'),
(u'From the Academy',
u'http://feeds.feedburner.com/jaapafromtheacademy'),
(u'Genomics in PA Practice',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=760'),
(u'Humane Medicine',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=758'),
(u'Inside the AAPA Policy Manual',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=1546'),
(u'Interpreting ECGs',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=1624'),
(u'Letters',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=808'),
(u'PA Quandaries',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=496'),
(u'Pharmacology Consult',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=1614'),
(u'POEMs', u'http://feeds.feedburner.com/jaapapoems'),
(u'Quick Recertification',
u'http://feeds.feedburner.com/jaapaquickrecertificationseries'),
(u'Sounding Board',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=698'),
(u'The Surgical Patient',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=499'),
(u'Topics in Infectious Diseases',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=2495'),
(u"What's New", u'http://feeds.feedburner.com/jaapawhatsnew'),
(u'When the Patient Asks',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=501'),
(u"Women's Health",
u'http://www.jaapa.com/pages/rss.aspx?sectionid=2176'),
(u'AAPA Special Article',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=1453'),
(u'Case Reports',
u'http://feeds.feedburner.com/jaapacasereports'),
(u'Review Articles',
u'http://feeds.feedburner.com/jaapareviewarticles'),
(u'Surgical Reviews',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=505'),
(u'Brief Report',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=2353'),
(u'Research Corner',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=498'),
(u'Research Reports',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=1024'),
(u'The Art of Medicine',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=1289'),
(u'Clinical Practice Guidelines',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=2102'),
(u'Complementary and Alternative Medicine',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=2123'),
(u'Drug Information',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=2089'),
(u'Evidence-Based Medicine',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=1288'),
(u'Patient Information',
u'http://www.jaapa.com/pages/rss.aspx?sectionid=2122')]
def print_version(self, url):
return url.replace('/article/', '/printarticle/')

14
recipes/kresy_pl.recipe Normal file
View File

@ -0,0 +1,14 @@
from calibre.web.feeds.news import BasicNewsRecipe
class Kresy(BasicNewsRecipe):
title = u'Kresy'
__author__ = 'fenuks'
description = u'portal społeczności kresowej'
language = 'pl'
masthead_url= 'http://www.kresy.pl/public/img/logo.png'
cover_url= 'http://www.kresy.pl/public/img/logo.png'
oldest_article = 7
max_articles_per_feed = 100
no_stylesheets = True
keep_only_tags= [dict(id='artykul')]
remove_tags= [dict(attrs={'class':['twitter-share-button', 'likefbborder', 'tagi']})]
feeds = [(u'Wszystkie', u'http://www.kresy.pl/rss')]

View File

@ -1,5 +1,5 @@
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>, Rogelio Domínguez <rogelio.dominguez@gmail.com>' __copyright__ = '2010-2012, Darko Miletic <darko.miletic at gmail.com>, Rogelio Domínguez <rogelio.dominguez@gmail.com>'
''' '''
www.jornada.unam.mx www.jornada.unam.mx
''' '''
@ -86,6 +86,6 @@ class LaJornada_mx(BasicNewsRecipe):
return soup return soup
def get_article_url(self, article): def get_article_url(self, article):
rurl = article.get('link', None) rurl = article.get('guid', None)
return rurl.rpartition('&partner=')[0] return rurl.rpartition('&partner=')[0]

View File

@ -0,0 +1,17 @@
__version__ = 'v1.0'
__date__ = '13, February 2011'
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1329125921(BasicNewsRecipe):
title = u'La pausa caff\xe8'
__author__ = 'faber1971'
description = 'An Italian satirical blog'
language = 'it'
oldest_article = 7
max_articles_per_feed = 100
auto_cleanup = True
no_stylesheets = True
feeds = [(u'La pausa caff\xe8', u'http://feeds.feedburner.com/LapausaCaffe')]

View File

@ -1,4 +1,5 @@
__license__ = 'GPL v3' __license__ = 'GPL v3'
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1327062445(BasicNewsRecipe): class AdvancedUserRecipe1327062445(BasicNewsRecipe):
@ -7,10 +8,13 @@ class AdvancedUserRecipe1327062445(BasicNewsRecipe):
max_articles_per_feed = 100 max_articles_per_feed = 100
auto_cleanup = True auto_cleanup = True
remove_javascript = True remove_javascript = True
no_stylesheets = True
remove_tags = [
dict(name='ul', attrs={'id':'ads0'})
]
masthead_url = 'http://www.simrendeogun.com/wp-content/uploads/2011/06/New-Marketing-Magazine-Logo.jpg' masthead_url = 'http://www.simrendeogun.com/wp-content/uploads/2011/06/New-Marketing-Magazine-Logo.jpg'
feeds = [(u'My Marketing', u'http://feed43.com/0537744466058428.xml'), (u'My Marketing_', u'http://feed43.com/8126723074604845.xml'), (u'Venturini', u'http://robertoventurini.blogspot.com/feeds/posts/default?alt=rss'), (u'Ninja Marketing', u'http://feeds.feedburner.com/NinjaMarketing'), (u'Comunitàzione', u'http://www.comunitazione.it/feed/novita.asp'), (u'Brandforum news', u'http://www.brandforum.it/rss/news'), (u'Brandforum papers', u'http://www.brandforum.it/rss/papers'), (u'Disambiguando', u'http://giovannacosenza.wordpress.com/feed/')]
__author__ = 'faber1971' __author__ = 'faber1971'
description = 'Collection of Italian marketing websites - v1.00 (28, January 2012)' description = 'Collection of Italian marketing websites - v1.03 (20, February 2012)'
language = 'it' language = 'it'
feeds = [(u'My Marketing', u'http://feed43.com/0537744466058428.xml'), (u'My Marketing_', u'http://feed43.com/8126723074604845.xml'), (u'Venturini', u'http://robertoventurini.blogspot.com/feeds/posts/default?alt=rss'), (u'Ninja Marketing', u'http://feeds.feedburner.com/NinjaMarketing'), (u'Comunitàzione', u'http://www.comunitazione.it/feed/novita.asp'), (u'Brandforum news', u'http://www.brandforum.it/rss/news'), (u'Brandforum papers', u'http://www.brandforum.it/rss/papers'), (u'MarketingArena', u'http://feeds.feedburner.com/marketingarena'), (u'minimarketing', u'http://feeds.feedburner.com/minimarketingit'), (u'Disambiguando', u'http://giovannacosenza.wordpress.com/feed/')]

View File

@ -1,69 +1,45 @@
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2009, Mathieu Godlewski <mathieu at godlewski.fr>; 2010, Louis Gesbert <meta at antislash dot info>' __copyright__ = '2009, Mathieu Godlewski <mathieu at godlewski.fr>; 2010-2012, Louis Gesbert <meta at antislash dot info>'
''' '''
Mediapart Mediapart
''' '''
from calibre.ebooks.BeautifulSoup import Tag __author__ = '2009, Mathieu Godlewski <mathieu at godlewski.fr>; 2010-2012, Louis Gesbert <meta at antislash dot info>'
from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class Mediapart(BasicNewsRecipe): class Mediapart(BasicNewsRecipe):
title = 'Mediapart' title = 'Mediapart'
__author__ = 'Mathieu Godlewski' __author__ = 'Mathieu Godlewski, Louis Gesbert'
description = 'Global news in french from online newspapers' description = 'Global news in french from news site Mediapart'
oldest_article = 7 oldest_article = 7
language = 'fr' language = 'fr'
needs_subscription = True needs_subscription = True
max_articles_per_feed = 50 max_articles_per_feed = 50
use_embedded_content = False
no_stylesheets = True no_stylesheets = True
cover_url = 'http://www.mediapart.fr/sites/all/themes/mediapart/mediapart/images/annonce.jpg' cover_url = 'http://static.mediapart.fr/files/pave_mediapart.jpg'
feeds = [ feeds = [
('Les articles', 'http://www.mediapart.fr/articles/feed'), ('Les articles', 'http://www.mediapart.fr/articles/feed'),
] ]
# -- print-version has poor quality on this website, better do the conversion ourselves # -- print-version
#
# preprocess_regexps = [ (re.compile(i[0], re.IGNORECASE|re.DOTALL), i[1]) for i in
# [
# (r'<div class="print-title">([^>]+)</div>', lambda match : '<h2>'+match.group(1)+'</h2>'),
# (r'<span class=\'auteur_staff\'>[^>]+<a title=\'[^\']*\'[^>]*>([^<]*)</a>[^<]*</span>',
# lambda match : '<i>'+match.group(1)+'</i>'),
# (r'\'', lambda match: '&rsquo;'),
# ]
# ]
#
# remove_tags = [ dict(name='div', attrs={'class':'print-source_url'}),
# dict(name='div', attrs={'class':'print-links'}),
# dict(name='img', attrs={'src':'entete_article.png'}),
# dict(name='br') ]
#
# def print_version(self, url):
# raw = self.browser.open(url).read()
# soup = BeautifulSoup(raw.decode('utf8', 'replace'))
# div = soup.find('div', {'id':re.compile('node-\d+')})
# if div is None:
# return None
# article_id = string.replace(div['id'], 'node-', '')
# if article_id is None:
# return None
# return 'http://www.mediapart.fr/print/'+article_id
# -- Non-print version [dict(name='div', attrs={'class':'advert'})] conversion_options = { 'smarten_punctuation' : True }
keep_only_tags = [ remove_tags = [ dict(name='div', attrs={'class':'print-source_url'}) ]
dict(name='h1', attrs={'class':'title'}),
dict(name='div', attrs={'class':'page_papier_detail'}),
]
def preprocess_html(self,soup): def print_version(self, url):
for title in soup.findAll('div', {'class':'titre'}): raw = self.browser.open(url).read()
tag = Tag(soup, 'h3') soup = BeautifulSoup(raw.decode('utf8', 'replace'))
title.replaceWith(tag) link = soup.find('a', {'title':'Imprimer'})
tag.insert(0,title) if link is None:
return soup return None
return link['href']
# -- Handle login # -- Handle login
@ -77,3 +53,10 @@ class Mediapart(BasicNewsRecipe):
br.submit() br.submit()
return br return br
def preprocess_html(self, soup):
for title in soup.findAll('p', {'class':'titre_page'}):
title.name = 'h3'
for legend in soup.findAll('span', {'class':'legend'}):
legend.insert(0, Tag(soup, 'br', []))
legend.name = 'small'
return soup

View File

@ -15,7 +15,7 @@ import re
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
from calibre.utils.magick import Image from calibre.utils.magick import Image
class Microwave_and_RF(BasicNewsRecipe): class Microwaves_and_RF(BasicNewsRecipe):
Convert_Grayscale = False # Convert images to gray scale or not Convert_Grayscale = False # Convert images to gray scale or not
@ -25,9 +25,9 @@ class Microwave_and_RF(BasicNewsRecipe):
# Add sections that want to be included from the magazine # Add sections that want to be included from the magazine
include_sections = [] include_sections = []
title = u'Microwave and RF' title = u'Microwaves and RF'
__author__ = 'kiavash' __author__ = u'kiavash'
description = u'Microwave and RF Montly Magazine' description = u'Microwaves and RF Montly Magazine'
publisher = 'Penton Media, Inc.' publisher = 'Penton Media, Inc.'
publication_type = 'magazine' publication_type = 'magazine'
site = 'http://mwrf.com' site = 'http://mwrf.com'
@ -96,9 +96,16 @@ class Microwave_and_RF(BasicNewsRecipe):
def parse_index(self): def parse_index(self):
# Fetches the main page of Microwave and RF # Fetches the main page of Microwaves and RF
soup = self.index_to_soup(self.site) soup = self.index_to_soup(self.site)
# First page has the ad, Let's find the redirect address.
url = soup.find('span', attrs={'class':'commonCopy'}).find('a').get('href')
if url.startswith('/'):
url = self.site + url
soup = self.index_to_soup(url)
# Searches the site for Issue ID link then returns the href address # Searches the site for Issue ID link then returns the href address
# pointing to the latest issue # pointing to the latest issue
latest_issue = soup.find('a', attrs={'href':lambda x: x and 'IssueID' in x}).get('href') latest_issue = soup.find('a', attrs={'href':lambda x: x and 'IssueID' in x}).get('href')

View File

@ -1,8 +1,9 @@
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
import re
class naczytniki(BasicNewsRecipe): class naczytniki(BasicNewsRecipe):
title = u'naczytniki.pl' title = u'naczytniki.pl'
__author__ = 'fenuks' __author__ = 'fenuks'
masthead_url= 'http://naczytniki.pl/wp-content/uploads/2010/08/logo_nc28.png'
cover_url = 'http://naczytniki.pl/wp-content/uploads/2010/08/logo_nc28.png' cover_url = 'http://naczytniki.pl/wp-content/uploads/2010/08/logo_nc28.png'
language = 'pl' language = 'pl'
description ='everything about e-readers' description ='everything about e-readers'
@ -10,6 +11,7 @@ class naczytniki(BasicNewsRecipe):
no_stylesheets=True no_stylesheets=True
oldest_article = 7 oldest_article = 7
max_articles_per_feed = 100 max_articles_per_feed = 100
preprocess_regexps = [(re.compile(ur'<p><br><b>Zobacz także:</b></p>.*?</body>', re.DOTALL), lambda match: '</body>') ]
remove_tags_after= dict(name='div', attrs={'class':'sociable'}) remove_tags_after= dict(name='div', attrs={'class':'sociable'})
keep_only_tags=[dict(name='div', attrs={'class':'post'})] keep_only_tags=[dict(name='div', attrs={'class':'post'})]
remove_tags=[dict(name='span', attrs={'class':'comments'}), dict(name='div', attrs={'class':'sociable'})] remove_tags=[dict(name='span', attrs={'class':'comments'}), dict(name='div', attrs={'class':'sociable'})]

View File

@ -1,21 +1,33 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
import re
class Nowa_Fantastyka(BasicNewsRecipe): class Nowa_Fantastyka(BasicNewsRecipe):
title = u'Nowa Fantastyka' title = u'Nowa Fantastyka'
oldest_article = 7 oldest_article = 7
__author__ = 'fenuks' __author__ = 'fenuks'
__modified_by__ = 'zaslav'
language = 'pl' language = 'pl'
encoding='latin2' encoding='latin2'
description ='site for fantasy readers' description ='site for fantasy readers'
category='fantasy' category='fantasy'
masthead_url='http://farm5.static.flickr.com/4133/4956658792_7ba7fbf562.jpg'
#extra_css='.tytul {font-size: 20px;}' #not working
max_articles_per_feed = 100 max_articles_per_feed = 100
INDEX='http://www.fantastyka.pl/' INDEX='http://www.fantastyka.pl/'
no_stylesheets=True no_stylesheets=True
needs_subscription = 'optional' needs_subscription = 'optional'
remove_tags_before=dict(attrs={'class':'belka1-tlo-md'}) remove_tags_before=dict(attrs={'class':'naglowek2'})
#remove_tags_after=dict(name='span', attrs={'class':'naglowek-oceny'}) #remove_tags_after=dict(name='span', attrs={'class':'naglowek-oceny'})
remove_tags_after=dict(name='td', attrs={'class':'belka1-bot'}) remove_tags_after=dict(name='form', attrs={'name':'form1'})
remove_tags=[dict(attrs={'class':'avatar2'}), dict(name='span', attrs={'class':'alert-oceny'}), dict(name='img', attrs={'src':['obrazki/sledz1.png', 'obrazki/print.gif', 'obrazki/mlnf.gif']}), dict(name='b', text='Dodaj komentarz'),dict(name='a', attrs={'href':'http://www.fantastyka.pl/10,1727.html'})] remove_tags=[dict(attrs={'class':['avatar2', 'belka-margin', 'naglowek2']}), dict(name='span', attrs={'class':'alert-oceny'}), dict(name='img', attrs={'src':['obrazki/sledz1.png', 'obrazki/print.gif', 'obrazki/mlnf.gif']}), dict(name='b', text='Dodaj komentarz'),dict(name='a', attrs={'href':'http://www.fantastyka.pl/10,1727.html'}), dict(name='form')]
preprocess_regexps = [
(re.compile(r'\<table .*?\>'), lambda match: ''),
(re.compile(r'\<td.*?\>'), lambda match: ''),
(re.compile(r'\<center\>'), lambda match: '')]
def find_articles(self, url): def find_articles(self, url):
articles = [] articles = []
@ -41,10 +53,10 @@ class Nowa_Fantastyka(BasicNewsRecipe):
return feeds return feeds
def get_cover_url(self): def get_cover_url(self):
soup = self.index_to_soup('http://www.fantastyka.pl/1.html') soup = self.index_to_soup('http://www.e-kiosk.pl/nowa_fantastyka')
cover=soup.find(name='img', attrs={'class':'okladka'}) self.cover_url='http://www.e-kiosk.pl' + soup.find(name='a', attrs={'class':'img'})['href']
self.cover_url=self.INDEX+ cover['src']
return getattr(self, 'cover_url', self.cover_url) return getattr(self, 'cover_url', self.cover_url)
def get_browser(self): def get_browser(self):
@ -56,3 +68,18 @@ class Nowa_Fantastyka(BasicNewsRecipe):
br['pass'] = self.password br['pass'] = self.password
br.submit() br.submit()
return br return br
def preprocess_html(self, soup):
for item in soup.findAll(style=True):
del item['style']
for item in soup.findAll(font=True):
del item['font']
for item in soup.findAll(align=True):
del item['align']
for item in soup.findAll(name='tr'):
item.name='div'
title=soup.find(attrs={'class':'tytul'})
if title:
title['style']='font-size: 20px; font-weight: bold;'
self.log.warn(soup)
return soup

31
recipes/oclab_pl.recipe Normal file
View File

@ -0,0 +1,31 @@
from calibre.web.feeds.news import BasicNewsRecipe
class OCLab(BasicNewsRecipe):
title = u'OCLab.pl'
oldest_article = 7
max_articles_per_feed = 100
__author__ = 'fenuks'
description = u'Portal OCLab.pl jest miejscem przyjaznym pasjonatom sprzętu komputerowego, w szczególności overclockerom, które będzie służyć im za aktualną bazę wiedzy o podkręcaniu komputera, źródło aktualnych informacji z rynku oraz opinii na temat sprzętu komputerowego.'
category = 'IT'
language = 'pl'
cover_url= 'http://www.idealforum.ru/attachment.php?attachmentid=7963&d=1316008118'
no_stylesheets = True
keep_only_tags=[dict(id='main')]
remove_tags_after= dict(attrs={'class':'single-postmetadata'})
remove_tags=[dict(attrs={'class':['single-postmetadata', 'pagebar']})]
feeds = [(u'Wpisy', u'http://oclab.pl/feed/')]
def append_page(self, soup, appendtag):
tag=soup.find(attrs={'class':'contentjumpddl'})
if tag:
nexturl=tag.findAll('option')
for nextpage in nexturl[1:-1]:
soup2 = self.index_to_soup(nextpage['value'])
pagetext = soup2.find(attrs={'class':'single-entry'})
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
for r in appendtag.findAll(attrs={'class':'post-nav-bottom-list'}):
r.extract()
def preprocess_html(self, soup):
self.append_page(soup, soup.body)
return soup

View File

@ -0,0 +1,37 @@
import re
from calibre.web.feeds.news import BasicNewsRecipe
class Overclock_pl(BasicNewsRecipe):
title = u'Overclock.pl'
oldest_article = 7
max_articles_per_feed = 100
__author__ = 'fenuks'
description = u'Vortal poświęcony tematyce hardware, kładący największy nacisk na podkręcanie / overclocking (włącznie z extreme) i chłodzenie / cooling (air cooling, water cooling, freon cooling, dry ice, liquid nitrogen).'
category = 'IT'
language = 'pl'
masthead_url='http://www.overclock.pl/gfx/logo_m.png'
cover_url='http://www.overclock.pl/gfx/logo_m.png'
no_stylesheets = True
remove_empty_feeds = True
preprocess_regexps = [(re.compile(ur'<b>Komentarze do aktualności:.*?</a>', re.DOTALL), lambda match: ''), (re.compile(ur'<h3>Nawigacja</h3>', re.DOTALL), lambda match: '') ]
keep_only_tags=[dict(name='div', attrs={'class':'news'}), dict(id='articleContent')]
remove_tags=[dict(name='span', attrs={'class':'info'}), dict(attrs={'class':'shareit'})]
feeds = [(u'Aktualno\u015bci', u'http://www.overclock.pl/rss.news.xml'), (u'Testy i recenzje', u'http://www.overclock.pl/rss.articles.xml')]
def append_page(self, soup, appendtag):
tag=soup.find(id='navigation')
if tag:
nexturl=tag.findAll('option')
tag.extract()
for nextpage in nexturl[2:]:
soup2 = self.index_to_soup(nextpage['value'])
pagetext = soup2.find(id='content')
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
rem=appendtag.find(attrs={'alt':'Pierwsza'})
if rem:
rem.parent.extract()
def preprocess_html(self, soup):
self.append_page(soup, soup.body)
return soup

14
recipes/palmtop_pl.recipe Normal file
View File

@ -0,0 +1,14 @@
from calibre.web.feeds.news import BasicNewsRecipe
class palmtop_pl(BasicNewsRecipe):
title = u'Palmtop.pl'
__author__ = 'fenuks'
description = 'wortal technologii mobilnych'
category = 'mobile'
language = 'pl'
cover_url='http://cdn.g-point.biz/wp-content/themes/palmtop-new/images/header_palmtop_logo.png'
masthead_url='http://cdn.g-point.biz/wp-content/themes/palmtop-new/images/header_palmtop_logo.png'
oldest_article = 7
max_articles_per_feed = 100
no_stylesheets = True
feeds = [(u'Newsy', u'http://palmtop.pl/feed/atom/')]

31
recipes/pc_arena.recipe Normal file
View File

@ -0,0 +1,31 @@
from calibre.web.feeds.news import BasicNewsRecipe
class PC_Arena(BasicNewsRecipe):
title = u'PCArena'
oldest_article = 18300
max_articles_per_feed = 100
__author__ = 'fenuks'
description = u'Najnowsze informacje z branży IT - testy, recenzje, aktualności, rankingi, wywiady. Twoje źródło informacji o sprzęcie komputerowym.'
category = 'IT'
language = 'pl'
masthead_url='http://pcarena.pl/public/design/frontend/images/logo.gif'
cover_url= 'http://pcarena.pl/public/design/frontend/images/logo.gif'
no_stylesheets = True
keep_only_tags=[dict(attrs={'class':['artHeader', 'art']})]
remove_tags=[dict(attrs={'class':'pages'})]
feeds = [(u'Newsy', u'http://pcarena.pl/misc/rss/news'), (u'Artyku\u0142y', u'http://pcarena.pl/misc/rss/articles')]
def append_page(self, soup, appendtag):
tag=soup.find(name='div', attrs={'class':'pagNum'})
if tag:
nexturl=tag.findAll('a')
tag.extract()
for nextpage in nexturl[1:]:
nextpage= 'http://pcarena.pl' + nextpage['href']
soup2 = self.index_to_soup(nextpage)
pagetext = soup2.find(attrs={'class':'artBody'})
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
def preprocess_html(self, soup):
self.append_page(soup, soup.body)
return soup

View File

@ -0,0 +1,41 @@
from calibre.web.feeds.news import BasicNewsRecipe
class PC_Centre(BasicNewsRecipe):
title = u'PC Centre'
oldest_article = 7
max_articles_per_feed = 100
__author__ = 'fenuks'
description = u'Portal komputerowy, a w nim: testy sprzętu komputerowego, recenzje gier i oprogramowania. a także opisy produktów związanych z komputerami.'
category = 'IT'
language = 'pl'
masthead_url= 'http://pccentre.pl/views/images/logo.gif'
cover_url= 'http://pccentre.pl/views/images/logo.gif'
no_stylesheets = True
keep_only_tags= [dict(id='content')]
remove_tags=[dict(attrs={'class':['ikony r', 'list_of_content', 'dot accordion']}), dict(id='comments')]
feeds = [(u'Publikacje', u'http://pccentre.pl/backend.php?mode=a'), (u'Aktualno\u015bci', u'http://pccentre.pl/backend.php'), (u'Sprz\u0119t komputerowy', u'http://pccentre.pl/backend.php?mode=n&section=2'), (u'Oprogramowanie', u'http://pccentre.pl/backend.php?mode=n&section=3'), (u'Gry komputerowe i konsole', u'http://pccentre.pl/backend.php?mode=n&section=4'), (u'Internet', u'http://pccentre.pl/backend.php?mode=n&section=7'), (u'Bezpiecze\u0144stwo', u'http://pccentre.pl/backend.php?mode=n&section=5'), (u'Multimedia', u'http://pccentre.pl/backend.php?mode=n&section=6'), (u'Biznes', u'http://pccentre.pl/backend.php?mode=n&section=9')]
def append_page(self, soup, appendtag):
tag=soup.find(name='div', attrs={'class':'pages'})
if tag:
nexturl=tag.findAll('a')
tag.extract()
for nextpage in nexturl[:-1]:
nextpage= 'http://pccentre.pl' + nextpage['href']
soup2 = self.index_to_soup(nextpage)
pagetext = soup2.find(id='content')
rem=pagetext.findAll(attrs={'class':['subtitle', 'content_info', 'list_of_content', 'pages', 'social2', 'pcc_acc', 'pcc_acc_na']})
for r in rem:
r.extract()
rem=pagetext.findAll(id='comments')
for r in rem:
r.extract()
rem=pagetext.findAll('h1')
for r in rem:
r.extract()
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
def preprocess_html(self, soup):
self.append_page(soup, soup.body)
return soup

35
recipes/pc_foster.recipe Normal file
View File

@ -0,0 +1,35 @@
from calibre.web.feeds.news import BasicNewsRecipe
class PC_Foster(BasicNewsRecipe):
title = u'PC Foster'
oldest_article = 7
max_articles_per_feed = 100
__author__ = 'fenuks'
description = u'Vortal technologiczny: testy, recenzje sprzętu komputerowego i telefonów, nowinki hardware, programy i gry dla Windows. Podkręcanie, modding i Overclocking.'
category = 'IT'
language = 'pl'
masthead_url='http://pcfoster.pl/public/images/logo.png'
cover_url= 'http://pcfoster.pl/public/images/logo.png'
no_stylesheets= True
remove_empty_feeds= True
keep_only_tags= [dict(id=['news_details', 'review_details']), dict(attrs={'class':'pager more_top'})]
remove_tags=[dict(name='p', attrs={'class':'right'})]
feeds = [(u'G\u0142\xf3wny', u'http://pcfoster.pl/public/rss/main.xml')]
def append_page(self, soup, appendtag):
nexturl= appendtag.find(attrs={'alt':u'Następna strona'})
if nexturl:
appendtag.find(attrs={'class':'pager more_top'}).extract()
while nexturl:
nexturl='http://pcfoster.pl' + nexturl.parent['href']
soup2 = self.index_to_soup(nexturl)
nexturl=soup2.find(attrs={'alt':u'Następna strona'})
pagetext = soup2.find(attrs={'class':'content'})
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
for r in appendtag.findAll(attrs={'class':'review_content double'}):
r.extract()
def preprocess_html(self, soup):
self.append_page(soup, soup.body)
return soup

View File

@ -0,0 +1,81 @@
from calibre.web.feeds.news import BasicNewsRecipe
import re
class Polska_times(BasicNewsRecipe):
title = u'Polska Times'
__author__ = 'fenuks'
description = u'Internetowe wydanie dziennika ogólnopolskiego Polska The Times. Najświeższe informacje: wydarzenia w kraju i na świecie, reportaże, poradniki, opinie.'
category = 'newspaper'
language = 'pl'
masthead_url = 'http://s.polskatimes.pl/g/logo_naglowek/polska.gif?17'
oldest_article = 7
max_articles_per_feed = 100
remove_emty_feeds= True
no_stylesheets = True
preprocess_regexps = [(re.compile(ur'<b>Czytaj także:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur',<b>Czytaj też:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>Zobacz także:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<center><h4><a.*?</a></h4></center>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ TEŻ:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ WIĘCEJ:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>CZYTAJ TAKŻE:.*?</b>', re.DOTALL), lambda match: ''), (re.compile(ur'<b>\* CZYTAJ KONIECZNIE:.*', re.DOTALL), lambda match: '</body>'), (re.compile(ur'<b>Nasze serwisy:</b>.*', re.DOTALL), lambda match: '</body>') ]
keep_only_tags= [dict(id=['tytul-artykulu', 'kontent'])]
remove_tags_after= dict(id='material-tagi')
remove_tags=[dict(attrs={'id':'reklama_srodtekst_0'}), dict(attrs={'id':'material-tagi'}), dict(name='div', attrs={'class':'zakladki'}), dict(attrs={'title':u'CZYTAJ TAKŻE'}), dict(attrs={'id':'podobne'}), dict(name='a', attrs={'href':'http://www.dzienniklodzki.pl/newsletter'})]
feeds = [(u'Fakty', u'http://polskatimes.feedsportal.com/c/32980/f/533648/index.rss'), (u'Opinie', u'http://www.polskatimes.pl/rss/opinie.xml'), (u'Sport', u'http://polskatimes.feedsportal.com/c/32980/f/533649/index.rss'), (u'Pieni\u0105dze', u'http://polskatimes.feedsportal.com/c/32980/f/533657/index.rss'), (u'Twoje finanse', u'http://www.polskatimes.pl/rss/twojefinanse.xml'), (u'Kultura', u'http://polskatimes.feedsportal.com/c/32980/f/533650/index.rss'), (u'Dodatki', u'http://www.polskatimes.pl/rss/dodatki.xml')]
def skip_ad_pages(self, soup):
if 'Advertisement' in soup.title:
nexturl=soup.find('a')['href']
return self.index_to_soup(nexturl, raw=True)
def append_page(self, soup, appendtag):
nexturl=soup.find(id='nastepna_strona')
while nexturl:
soup2= self.index_to_soup(nexturl['href'])
nexturl=soup2.find(id='nastepna_strona')
pagetext = soup2.find(id='tresc')
for dictionary in self.remove_tags:
v=pagetext.findAll(attrs=dictionary['attrs'])
for delete in v:
delete.extract()
for b in pagetext.findAll(name='b'):
if b.string:
if u'CZYTAJ TEŻ' in b.string or u'Czytaj także' in b.string or u'Czytaj też' in b.string or u'Zobacz także' in b.string:
b.extract()
for center in pagetext.findAll(name='center'):
if center.h4:
if center.h4.a:
center.extract()
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
for paginator in appendtag.findAll(attrs={'class':'stronicowanie'}):
paginator.extract()
def image_article(self, soup, appendtag):
nexturl=soup.find('a', attrs={'class':'nastepna'})
urls=[]
while nexturl:
if nexturl not in urls:
urls.append(nexturl)
else:
break
soup2= self.index_to_soup('http://www.polskatimes.pl/artykul/' + nexturl['href'])
nexturl=soup2.find('a', attrs={'class':'nastepna'})
if nexturl in urls:
break;
pagetext = soup2.find(id='galeria-material')
pos = len(appendtag.contents)
appendtag.insert(pos, '<br />')
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
for rem in appendtag.findAll(attrs={'class':['galeriaNawigator', 'miniaturyPojemnik']}):
rem.extract()
for paginator in appendtag.findAll(attrs={'class':'stronicowanie'}):
paginator.extract()
def preprocess_html(self, soup):
if soup.find('a', attrs={'class':'nastepna'}):
self.image_article(soup, soup.body)
elif soup.find(id='nastepna_strona'):
self.append_page(soup, soup.body)
return soup
def get_cover_url(self):
soup = self.index_to_soup('http://www.prasa24.pl/gazeta/metropolia-warszawska/')
self.cover_url=soup.find(id='pojemnik').img['src']
return getattr(self, 'cover_url', self.cover_url)

33
recipes/pure_pc.recipe Normal file
View File

@ -0,0 +1,33 @@
from calibre.web.feeds.news import BasicNewsRecipe
class PurePC(BasicNewsRecipe):
title = u'PurePC'
oldest_article = 7
max_articles_per_feed = 100
__author__ = 'fenuks'
description = u'Artykuły, aktualności, sprzęt, forum, chłodzenie, modding, urządzenia mobilne - wszystko w jednym miejscu.'
category = 'IT'
language = 'pl'
masthead_url= 'http://www.purepc.pl/themes/new/images/purepc.jpg'
cover_url= 'http://www.purepc.pl/themes/new/images/purepc.jpg'
no_stylesheets = True
keep_only_tags= [dict(id='content')]
remove_tags_after= dict(attrs={'class':'fivestar-widget'})
remove_tags= [dict(id='navigator'), dict(attrs={'class':['box-tools', 'fivestar-widget', 'PageMenuList']})]
feeds = [(u'Wiadomo\u015bci', u'http://www.purepc.pl/node/feed')]
def append_page(self, soup, appendtag):
nexturl= appendtag.find(attrs={'class':'pager-next'})
if nexturl:
while nexturl:
soup2 = self.index_to_soup('http://www.purepc.pl'+ nexturl.a['href'])
nexturl=soup2.find(attrs={'class':'pager-next'})
pagetext = soup2.find(attrs={'class':'article'})
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
for r in appendtag.findAll(attrs={'class':['PageMenuList', 'pager', 'fivestar-widget']}):
r.extract()
def preprocess_html(self, soup):
self.append_page(soup, soup.body)
return soup

View File

@ -0,0 +1,54 @@
__copyright__ = '2012, Micha\u0142 <webmaster@racjonalista.pl>'
'''
Racjonalista.pl
'''
from calibre.web.feeds.news import BasicNewsRecipe
import re
class Racjonalista(BasicNewsRecipe):
__author__ = u'Micha\u0142 <webmaster@racjonalista.pl>'
publisher = u'Fundacja Wolnej My\u015bli'
title = u'Racjonalista.pl'
description = u'Racjonalista.pl'
category = 'newspaper'
language = 'pl'
encoding = 'iso-8859-2'
oldest_article = 7
max_articles_per_feed = 20
remove_javascript = True
no_stylesheets = True
use_embedded_content = False
simultaneous_downloads = 2
timeout = 30
cover_url = 'http://www.racjonalista.pl/img/uimg/rac.gif'
feeds = [(u'Racjonalista.pl', u'http://www.racjonalista.pl/rss.php')]
match_regexps = [r'kk\.php']
def print_version(self, url):
return url.replace('/s,', '/t,')
extra_css = 'h2 {font: serif large} .cytat {text-align: right}'
remove_attributes = ['target', 'width', 'height']
preprocess_regexps = [
(re.compile(i[0], re.DOTALL), i[1]) for i in
[ (r'<p[^>]*>&nbsp;</p>', lambda match: ''),
(r'&nbsp;', lambda match: ' '),
(r'<meta[^>]+>', lambda match: ''),
(r'<link[^>]+>', lambda match: ''),
(r'</?center>', lambda match: ''),
(r'<a href="[^"]+" rel=author><b>(?P<a>[^<]+)</b></a>', lambda match: '<b>' + match.group('a') + '</b>'),
(r'<div align=center style="font-size:18px">(?P<t>[^<]+)</div>', lambda match: '<h2>' + match.group('t') + '</h2>'),
(r'<table align=center width=700 border=0 cellpadding=0 cellspacing=0><tr><td width="100%" bgcolor="#edeceb" height="100%" style="font-size:12px">', lambda match: ''),
(r'</td></tr><tr><td>', lambda match: ''),
(r'</td></tr></table></body>', lambda match: '</body>'),
(r'<a[^>]+><sup>(?P<p>[^<]+)</sup></a>', lambda match: '<sup>' + match.group('p') + '</sup>'),
(r'<a name=p[^>]+>(?P<a>[^<]+)</a>', lambda match: match.group('a')),
(r'<a href="[^"]+" target=_blank class=linkext>Orygin[^<]+</a>', lambda match: ''),
(r'<a href="[^"]+" class=powiazanie>Poka[^<]+</a>', lambda match: '')]
]

View File

@ -1,13 +1,11 @@
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2010, Louis Gesbert <meta at antislash dot info>' __copyright__ = '2010-2012, Louis Gesbert <meta at antislash dot info>'
''' '''
Rue89 Rue89
''' '''
__author__ = '2010, Louis Gesbert <meta at antislash dot info>' __author__ = '2010-2012, Louis Gesbert <meta at antislash dot info>'
import re
from calibre.ebooks.BeautifulSoup import Tag
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class Rue89(BasicNewsRecipe): class Rue89(BasicNewsRecipe):
@ -17,37 +15,45 @@ class Rue89(BasicNewsRecipe):
title = u'Rue89' title = u'Rue89'
language = 'fr' language = 'fr'
oldest_article = 7 oldest_article = 7
max_articles_per_feed = 50 max_articles_per_feed = 12
feeds = [(u'La Une', u'http://www.rue89.com/homepage/feed')] use_embedded_content = False
# From http://www.rue89.com/les-flux-rss-de-rue89
feeds = [
(u'La Une', u'http://www.rue89.com/feed'),
(u'Rue69', u'http://www.rue89.com/rue69/feed'),
(u'Eco', u'http://www.rue89.com/rue89-eco/feed'),
(u'Planète', u'http://www.rue89.com/rue89-planete/feed'),
(u'Sport', u'http://www.rue89.com/rue89-sport/feed'),
(u'Culture', u'http://www.rue89.com/culture/feed'),
(u'Hi-tech', u'http://www.rue89.com/hi-tech/feed'),
(u'Media', u'http://www.rue89.com/medias/feed'),
(u'Monde', u'http://www.rue89.com/monde/feed'),
(u'Politique', u'http://www.rue89.com/politique/feed'),
(u'Societe', u'http://www.rue89.com/societe/feed'),
]
# Follow redirection from feedsportal.com
def get_article_url(self,article):
return self.browser.open_novisit(article.link).geturl()
def print_version(self, url):
return url + '?imprimer=1'
no_stylesheets = True no_stylesheets = True
preprocess_regexps = [ conversion_options = { 'smarten_punctuation' : True }
(re.compile(r'<(/?)h2>', re.IGNORECASE|re.DOTALL),
lambda match : '<'+match.group(1)+'h3>'),
(re.compile(r'<div class="print-title">([^>]+)</div>', re.IGNORECASE|re.DOTALL),
lambda match : '<h2>'+match.group(1)+'</h2>'),
(re.compile(r'<img[^>]+src="[^"]*/numeros/(\d+)[^0-9.">]*.gif"[^>]*/>', re.IGNORECASE|re.DOTALL),
lambda match : '<span style="font-family: Sans-serif; color: red; font-size:24pt; padding=2pt;">'+match.group(1)+'</span>'),
(re.compile(r'\''), lambda match: '&rsquo;'),
]
def preprocess_html(self,soup): keep_only_tags = [
body = Tag(soup, 'body') dict(name='div', attrs={'id':'article'}),
title = soup.find('h1', {'class':'title'}) ]
content = soup.find('div', {'class':'content'})
soup.body.replaceWith(body)
body.insert(0, title)
body.insert(1, content)
return soup
remove_tags = [ #dict(name='div', attrs={'class':'print-source_url'}), remove_tags_after = [
#dict(name='div', attrs={'class':'print-links'}), dict(name='div', attrs={'id':'plus_loin'}),
#dict(name='img', attrs={'class':'print-logo'}), ]
dict(name='div', attrs={'class':'content_top'}),
dict(name='div', attrs={'id':'sidebar-left'}), ]
# -- print-version has poor quality on this website, better do the conversion ourselves remove_tags = [
# def print_version(self, url): dict(name='div', attrs={'id':'article_tools'}),
# return re.sub('^.*-([0-9]+)$', 'http://www.rue89.com/print/\\1',url) dict(name='div', attrs={'id':'plus_loin'}),
]

155
recipes/satmagazine.recipe Normal file
View File

@ -0,0 +1,155 @@
#!/usr/bin/env python
##
## Title: SatMagazine
##
## License: GNU General Public License v3 - http://www.gnu.org/copyleft/gpl.html
##
## Written: Feb 2012
## Last Edited: Mar 2012
##
# Feb 2012: Initial release
__license__ = 'GNU General Public License v3 - http://www.gnu.org/copyleft/gpl.html'
'''
satmagazine.com
'''
import re
from calibre.web.feeds.news import BasicNewsRecipe
class SatMagazine(BasicNewsRecipe):
title = u'SatMagazine'
description = u'North American Satellite Markets...'
publisher = 'Satnews Publishers'
publication_type = 'magazine'
INDEX = 'http://www.satmagazine.com/cgi-bin/display_edition.cgi'
__author__ = 'kiavash'
language = 'en'
asciiize = True
timeout = 120
simultaneous_downloads = 2
# Flattens all the tables to make it compatible with Nook
conversion_options = {'linearize_tables' : True}
keep_only_tags = [dict(name='span', attrs={'class':'story'})]
no_stylesheets = True
remove_javascript = True
remove_attributes = [ 'border', 'cellspacing', 'align', 'cellpadding', 'colspan',
'valign', 'vspace', 'hspace', 'alt', 'width', 'height' ]
# Specify extra CSS - overrides ALL other CSS (IE. Added last).
extra_css = 'body { font-family: verdana, helvetica, sans-serif; } \
.introduction, .first { font-weight: bold; } \
.cross-head { font-weight: bold; font-size: 125%; } \
.cap, .caption { display: block; font-size: 80%; font-style: italic; } \
.cap, .caption, .caption img, .caption span { display: block; margin: 5px auto; } \
.byl, .byd, .byline img, .byline-name, .byline-title, .author-name, .author-position, \
.correspondent-portrait img, .byline-lead-in, .name, .bbc-role { display: block; \
font-size: 80%; font-style: italic; margin: 1px auto; } \
.story-date, .published { font-size: 80%; } \
table { width: 100%; } \
td img { display: block; margin: 5px auto; } \
ul { padding-top: 10px; } \
ol { padding-top: 10px; } \
li { padding-top: 5px; padding-bottom: 5px; } \
h1 { font-size: 175%; font-weight: bold; } \
h2 { font-size: 150%; font-weight: bold; } \
h3 { font-size: 125%; font-weight: bold; } \
h4, h5, h6 { font-size: 100%; font-weight: bold; }'
# Remove the line breaks, href links and float left/right and picture width/height.
preprocess_regexps = [(re.compile(r'<br[ ]*/>', re.IGNORECASE), lambda m: ''),
(re.compile(r'<br[ ]*clear.*/>', re.IGNORECASE), lambda m: ''),
(re.compile(r'<a.*?>'), lambda h1: ''),
(re.compile(r'</a>'), lambda h2: ''),
(re.compile(r'float:.*?'), lambda h3: ''),
(re.compile(r'width:.*?px'), lambda h4: ''),
(re.compile(r'height:.*?px'), lambda h5: '')
]
def parse_index(self):
article_info = []
feeds = []
soup = self.index_to_soup(self.INDEX)
# Find Cover image
cover = soup.find('img', src=True, alt='Cover Image')
if cover is not None:
self.cover_url = cover['src']
self.log('Found Cover image:', self.cover_url)
soup = soup.find('div', attrs={'id':'middlecontent'}) # main part of the site that has the articles
#Find the Magazine date
ts = soup.find('span', attrs={'class':'master_heading'}) # contains the string with the date
ds = ' '.join(self.tag_to_string(ts).strip().split()[:2])
self.log('Found Current Issue:', ds)
self.timefmt = ' [%s]'%ds
#sections = soup.findAll('span', attrs={'class':'upper_heading'})
articles = soup.findAll('span', attrs={'class':'heading'})
descriptions = soup.findAll('span', attrs={'class':'story'})
title_number = 0
# Goes thru all the articles one by one and sort them out
for article in articles:
title = self.tag_to_string(article)
url = article.find('a').get('href')
self.log('\tFound article:', title, 'at', url)
desc = self.tag_to_string(descriptions[title_number])
#self.log('\t\t', desc)
article_info.append({'title':title, 'url':url, 'description':desc,
'date':self.timefmt})
title_number = title_number + 1
if article_info:
feeds.append((self.title, article_info))
return feeds
def preprocess_html(self, soup):
# Finds all the images
for figure in soup.findAll('img', attrs = {'src' : True}):
# if the image is an ad then remove it.
if (figure['alt'].find('_ad_') >=0) or (figure['alt'].find('_snipe_') >=0):
del figure['src']
del figure['alt']
del figure['border']
del figure['hspace']
del figure['vspace']
del figure['align']
del figure['size']
figure.name = 'font'
continue
figure['style'] = 'display:block' # adds /n before and after the image
# Makes the title standing out
for title in soup.findAll('b'):
title.name = 'h3'
# Removes all unrelated links
for link in soup.findAll('a', attrs = {'href': True}):
link.name = 'font'
del link['href']
del link['target']
return soup

View File

@ -11,7 +11,7 @@ class Sueddeutsche(BasicNewsRecipe):
title = u'Süddeutsche.de' # 2012-01-26 AGe Correct Title title = u'Süddeutsche.de' # 2012-01-26 AGe Correct Title
description = 'News from Germany, Access to online content' # 2012-01-26 AGe description = 'News from Germany, Access to online content' # 2012-01-26 AGe
__author__ = 'Oliver Niesner and Armin Geller' #Update AGe 2012-01-26 __author__ = 'Oliver Niesner and Armin Geller' #Update AGe 2012-01-26
publisher = 'Süddeutsche Zeitung' # 2012-01-26 AGe add publisher = u'Süddeutsche Zeitung' # 2012-01-26 AGe add
category = 'news, politics, Germany' # 2012-01-26 AGe add category = 'news, politics, Germany' # 2012-01-26 AGe add
timefmt = ' [%a, %d %b %Y]' # 2012-01-26 AGe add %a timefmt = ' [%a, %d %b %Y]' # 2012-01-26 AGe add %a
oldest_article = 7 oldest_article = 7

View File

@ -9,10 +9,10 @@ from calibre.web.feeds.news import BasicNewsRecipe
from calibre import strftime from calibre import strftime
class SueddeutcheZeitung(BasicNewsRecipe): class SueddeutcheZeitung(BasicNewsRecipe):
title = 'Süddeutsche Zeitung' title = u'Süddeutsche Zeitung'
__author__ = 'Darko Miletic' __author__ = 'Darko Miletic'
description = 'News from Germany. Access to paid content.' description = 'News from Germany. Access to paid content.'
publisher = 'Süddeutsche Zeitung' publisher = u'Süddeutsche Zeitung'
category = 'news, politics, Germany' category = 'news, politics, Germany'
no_stylesheets = True no_stylesheets = True
oldest_article = 2 oldest_article = 2

View File

@ -1,14 +1,16 @@
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
import re
class Tablety_pl(BasicNewsRecipe): class Tablety_pl(BasicNewsRecipe):
title = u'Tablety.pl' title = u'Tablety.pl'
__author__ = 'fenuks' __author__ = 'fenuks'
description = u'tablety.pl - latest tablet news' description = u'tablety.pl - latest tablet news'
masthead_url= 'http://www.tablety.pl/wp-content/themes/kolektyw/img/logo.png'
cover_url = 'http://www.tablety.pl/wp-content/themes/kolektyw/img/logo.png' cover_url = 'http://www.tablety.pl/wp-content/themes/kolektyw/img/logo.png'
category = 'IT' category = 'IT'
language = 'pl' language = 'pl'
oldest_article = 8 oldest_article = 8
max_articles_per_feed = 100 max_articles_per_feed = 100
preprocess_regexps = [(re.compile(ur'<p><strong>Przeczytaj także.*?</a></strong></p>', re.DOTALL), lambda match: ''), (re.compile(ur'<p><strong>Przeczytaj koniecznie.*?</a></strong></p>', re.DOTALL), lambda match: '')]
remove_tags_before=dict(name="h1", attrs={'class':'entry-title'}) remove_tags_before=dict(name="h1", attrs={'class':'entry-title'})
remove_tags_after=dict(name="div", attrs={'class':'snap_nopreview sharing robots-nocontent'}) remove_tags_after=dict(name="div", attrs={'class':'snap_nopreview sharing robots-nocontent'})
remove_tags=[dict(name='div', attrs={'class':'snap_nopreview sharing robots-nocontent'})] remove_tags=[dict(name='div', attrs={'class':'snap_nopreview sharing robots-nocontent'})]

37
recipes/tanuki.recipe Normal file
View File

@ -0,0 +1,37 @@
from calibre.web.feeds.news import BasicNewsRecipe
import re
class tanuki(BasicNewsRecipe):
title = u'Tanuki'
oldest_article = 7
__author__ = 'fenuks'
category = 'anime, manga'
language = 'pl'
max_articles_per_feed = 100
encoding='utf-8'
extra_css= 'ul {list-style: none; padding: 0; margin: 0;} .kadr{float: left;} .dwazdania {float: right;}'
preprocess_regexps = [(re.compile(ur'<h3><a class="screen".*?</h3>', re.DOTALL), lambda match: ''), (re.compile(ur'<div><a href="/strony/((manga)|(anime))/[0-9]+?/oceny(\-redakcji){0,1}">Zobacz jak ocenili</a></div>', re.DOTALL), lambda match: '')]
remove_empty_feeds= True
no_stylesheets = True
keep_only_tags=[dict(attrs={'class':['animename', 'storyname', 'nextarrow','sideinfov', 'sidelinfov', 'sideinfo', 'sidelinfo']}), dict(name='table', attrs={'summary':'Technikalia'}), dict(attrs={'class':['chaptername','copycat']}), dict(id='rightcolumn'), dict(attrs={'class':['headn_tt', 'subtable']})]
remove_tags=[dict(name='div', attrs={'class':'screen'}), dict(id='randomtoplist'), dict(attrs={'class':'note'})]
feeds = [(u'Anime', u'http://anime.tanuki.pl/rss_anime.xml'), (u'Manga', u'http://manga.tanuki.pl/rss_manga.xml'), (u'Tomiki', u'http://manga.tanuki.pl/rss_mangabooks.xml'), (u'Artyku\u0142y', u'http://czytelnia.tanuki.pl/rss_czytelnia_artykuly.xml'), (u'Opowiadania', u'http://czytelnia.tanuki.pl/rss_czytelnia.xml')]
def append_page(self, soup, appendtag):
nexturl= appendtag.find(attrs={'class':'nextarrow'})
if nexturl:
while nexturl:
soup2 = self.index_to_soup('http://czytelnia.tanuki.pl'+ nexturl['href'])
nexturl=soup2.find(attrs={'class':'nextarrow'})
pagetext = soup2.find(attrs={'class':['chaptername', 'copycat']})
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
pagetext = soup2.find(attrs={'class':'copycat'})
pos = len(appendtag.contents)
appendtag.insert(pos, pagetext)
for r in appendtag.findAll(attrs={'class':'nextarrow'}):
r.extract()
def preprocess_html(self, soup):
self.append_page(soup, soup.body)
return soup

View File

@ -1,49 +1,57 @@
import re import re
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.recipes import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import Tag
class AdvancedUserRecipe1268409464(BasicNewsRecipe): class AdvancedUserRecipe1325006965(BasicNewsRecipe):
title = u'The Sun'
__author__ = 'Chaz Ralph' title = u'The Sun UK'
description = 'News from The Sun' cover_url = 'http://www.thesun.co.uk/img/global/new-masthead-logo.png'
description = 'A Recipe for The Sun tabloid UK - uses feed43'
__author__ = 'Dave Asbury'
# last updated 20/2/12
language = 'en_GB'
oldest_article = 1 oldest_article = 1
max_articles_per_feed = 100 max_articles_per_feed = 15
language = 'en' remove_empty_feeds = True
no_stylesheets = True no_stylesheets = True
extra_css = '.headline {font-size: x-large;} \n .fact { padding-top: 10pt }'
encoding= 'iso-8859-1' masthead_url = 'http://www.thesun.co.uk/sol/img/global/Sun-logo.gif'
remove_javascript = True encoding = 'cp1251'
encoding = 'cp1252'
remove_empty_feeds = True
remove_javascript = True
no_stylesheets = True
extra_css = '''
body{ text-align: justify; font-family:Arial,Helvetica,sans-serif; font-size:11px; font-size-adjust:none; font-stretch:normal; font-style:normal; font-variant:normal; font-weight:normal;}
'''
preprocess_regexps = [
(re.compile(r'<div class="foot-copyright".*?</div>', re.IGNORECASE | re.DOTALL), lambda match: '')]
keep_only_tags = [ keep_only_tags = [
dict(id='column-print') dict(name='h1'),dict(name='h2',attrs={'class' : 'medium centered'}),
dict(name='div',attrs={'class' : 'text-center'}),
dict(name='div',attrs={'id' : 'bodyText'})
# dict(name='p')
]
remove_tags=[
#dict(name='head'),
dict(attrs={'class' : ['mystery-meat-link','ltbx-container','ltbx-var ltbx-hbxpn','ltbx-var ltbx-nav-loop','ltbx-var ltbx-url']}),
dict(name='div',attrs={'class' : 'cf'}),
dict(attrs={'title' : 'download flash'}),
dict(attrs={'style' : 'padding: 5px'})
]
feeds = [
(u'News','http://feed43.com/2517447382644748.xml'),
(u'Sport', u'http://feed43.com/4283846255668687.xml'),
(u'Bizarre', u'http://feed43.com/0233840304242011.xml'),
(u'Film',u'http://feed43.com/1307545221226200.xml'),
(u'Music',u'http://feed43.com/1701513435064132.xml'),
(u'Sun Woman',u'http://feed43.com/0022626854226453.xml'),
] ]
remove_tags = [
dict(name='div', attrs={'class':[
'clear text-center small padding-left-right-5 text-999 padding-top-5 padding-bottom-10 grey-solid-line',
'clear width-625 bg-fff padding-top-10'
]}),
dict(name='video'),
]
def preprocess_html(self, soup):
h1 = soup.find('h1')
if h1 is not None:
text = self.tag_to_string(h1)
nh = Tag(soup, 'h1')
nh.insert(0, text)
h1.replaceWith(nh)
return soup
feeds = [(u'News', u'http://www.thesun.co.uk/sol/homepage/feeds/rss/article312900.ece')
,(u'Sport', u'http://www.thesun.co.uk/sol/homepage/feeds/rss/article247732.ece')
,(u'Football', u'http://www.thesun.co.uk/sol/homepage/feeds/rss/article247739.ece')
,(u'Gizmo', u'http://www.thesun.co.uk/sol/homepage/feeds/rss/article247829.ece')
,(u'Bizarre', u'http://www.thesun.co.uk/sol/homepage/feeds/rss/article247767.ece')]
def print_version(self, url):
return re.sub(r'\?OTC-RSS&ATTR=[-a-zA-Z]+', '?print=yes', url)

View File

@ -0,0 +1,11 @@
from calibre.web.feeds.news import BasicNewsRecipe
class BasicUserRecipe1317069944(BasicNewsRecipe):
title = u'Times of Malta'
__author__ = 'To Do'
language = 'en'
oldest_article = 7
max_articles_per_feed = 100
auto_cleanup = True
feeds = [(u'Times of Malta', u'http://www.timesofmalta.com/rss')]

24
recipes/tvn24.recipe Normal file
View File

@ -0,0 +1,24 @@
from calibre.web.feeds.news import BasicNewsRecipe
class tvn24(BasicNewsRecipe):
title = u'TVN24'
oldest_article = 7
max_articles_per_feed = 100
__author__ = 'fenuks'
description = u'Sport, Biznes, Gospodarka, Informacje, Wiadomości Zawsze aktualne wiadomości z Polski i ze świata'
category = 'news'
language = 'pl'
masthead_url= 'http://www.tvn24.pl/_d/topmenu/logo2.gif'
cover_url= 'http://www.tvn24.pl/_d/topmenu/logo2.gif'
extra_css= 'ul {list-style: none; padding: 0; margin: 0;} li {float: left;margin: 0 0.15em;}'
remove_empty_feeds = True
remove_javascript = True
no_stylesheets = True
keep_only_tags=[dict(id='tvn24_wiadomosci_detal'), dict(name='h1', attrs={'class':'standardHeader1'}), dict(attrs={'class':['date60m rd5', 'imageBackground fl rd7', 'contentFromCMS']})]
remove_tags_after= dict(name='div', attrs={'class':'socialBoxesBottom'})
remove_tags=[dict(attrs={'class':['tagi_detal', 'socialBoxesBottom', 'twitterBox', 'commentsInfo', 'textSize', 'obj_ukrytydruk obj_ramka1_r', 'related newsNews align-right', 'box', 'newsUserList', 'watchMaterial text']})]
feeds = [(u'Najnowsze', u'http://www.tvn24.pl/najnowsze.xml'), (u'Polska', u'www.tvn24.pl/polska.xml'), (u'\u015awiat', u'http://www.tvn24.pl/swiat.xml'), (u'Sport', u'http://www.tvn24.pl/sport.xml'), (u'Biznes', u'http://www.tvn24.pl/biznes.xml'), (u'Meteo', u'http://www.tvn24.pl/meteo.xml'), (u'Micha\u0142ki', u'http://www.tvn24.pl/michalki.xml'), (u'Kultura', u'http://www.tvn24.pl/kultura.xml')]
def preprocess_html(self, soup):
for item in soup.findAll(style=True):
del item['style']
return soup

View File

@ -4,10 +4,12 @@ class Ubuntu_pl(BasicNewsRecipe):
title = u'UBUNTU.pl' title = u'UBUNTU.pl'
__author__ = 'fenuks' __author__ = 'fenuks'
description = 'UBUNTU.pl - polish ubuntu community site' description = 'UBUNTU.pl - polish ubuntu community site'
masthead_url= 'http://ubuntu.pl/img/logo.jpg'
cover_url = 'http://ubuntu.pl/img/logo.jpg' cover_url = 'http://ubuntu.pl/img/logo.jpg'
category = 'linux, IT' category = 'linux, IT'
language = 'pl' language = 'pl'
no_stylesheets = True no_stylesheets = True
remove_empty_feeds = True
oldest_article = 8 oldest_article = 8
max_articles_per_feed = 100 max_articles_per_feed = 100
extra_css = '#main {text-align:left;}' extra_css = '#main {text-align:left;}'

View File

@ -0,0 +1,39 @@
from calibre.web.feeds.news import BasicNewsRecipe
class webhosting_pl(BasicNewsRecipe):
title = u'Webhosting.pl'
__author__ = 'fenuks'
description = 'Webhosting.pl to pierwszy na polskim rynku serwis poruszający w szerokim aspekcie tematy związane z hostingiem, globalną Siecią i usługami internetowymi. Głównym celem przedsięwzięcia jest dostarczanie przydatnej i bogatej merytorycznie wiedzy osobom, które chcą tworzyć i efektywnie wykorzystywać współczesny Internet.'
category = 'web'
language = 'pl'
cover_url='http://webhosting.pl/images/logo.png'
masthead_url='http://webhosting.pl/images/logo.png'
oldest_article = 7
max_articles_per_feed = 100
no_stylesheets = True
remove_empty_feeds = True
#keep_only_tags= [dict(name='div', attrs={'class':'content_article'}), dict(attrs={'class':'paging'})]
#remove_tags=[dict(attrs={'class':['tags', 'wykop', 'facebook_button_count', 'article_bottom']})]
feeds = [(u'Newsy', u'http://webhosting.pl/feed/rss/an'),
(u'Artyku\u0142y', u'http://webhosting.pl/feed/rss/aa'),
(u'Software', u'http://webhosting.pl/feed/rss/n/12'),
(u'Internet', u'http://webhosting.pl/feed/rss/n/9'),
(u'Biznes', u'http://webhosting.pl/feed/rss/n/13'),
(u'Bezpiecze\u0144stwo', u'http://webhosting.pl/feed/rss/n/10'),
(u'Blogi', u'http://webhosting.pl/feed/rss/ab'),
(u'Programowanie', u'http://webhosting.pl/feed/rss/n/8'),
(u'Kursy', u'http://webhosting.pl/feed/rss/n/11'),
(u'Tips&Tricks', u'http://webhosting.pl/feed/rss/n/15'),
(u'Imprezy', u'http://webhosting.pl/feed/rss/n/22'),
(u'Wywiady', u'http://webhosting.pl/feed/rss/n/24'),
(u'Porady', u'http://webhosting.pl/feed/rss/n/3027'),
(u'Znalezione w sieci', u'http://webhosting.pl/feed/rss/n/6804'),
(u'Dev area', u'http://webhosting.pl/feed/rss/n/24504'),
(u"Webmaster's blog", u'http://webhosting.pl/feed/rss/n/29195'),
(u'Domeny', u'http://webhosting.pl/feed/rss/n/11513'),
(u'Praktyka', u'http://webhosting.pl/feed/rss/n/2'),
(u'Serwery', u'http://webhosting.pl/feed/rss/n/11514'),
(u'Inne', u'http://webhosting.pl/feed/rss/n/24811'),
(u'Marketing', u'http://webhosting.pl/feed/rss/n/11535')]
def print_version(self, url):
return url.replace('webhosting.pl', 'webhosting.pl/print')

View File

@ -3,7 +3,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class Worldcrunch(BasicNewsRecipe): class Worldcrunch(BasicNewsRecipe):
title = u'Worldcrunch' title = u'Worldcrunch'
__author__ = 'Krittika Goyal' __author__ = 'Krittika Goyal'
oldest_article = 1 #days oldest_article = 2 #days
max_articles_per_feed = 25 max_articles_per_feed = 25
use_embedded_content = False use_embedded_content = False

View File

@ -1,7 +1,10 @@
@echo OFF @echo OFF
REM Calibre-Portable.bat
REM ¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬
REM
REM Batch File to start a Calibre configuration on Windows REM Batch File to start a Calibre configuration on Windows
REM giving explicit control of the location of: REM giving explicit control of the location of:
REM - Calibe Program Files REM - Calibre Program Files
REM - Calibre Library Files REM - Calibre Library Files
REM - Calibre Config Files REM - Calibre Config Files
REM - Calibre Metadata database REM - Calibre Metadata database
@ -25,6 +28,19 @@ REM - CalibreSource Location of Calibre Source files (Optional)
REM REM
REM This batch file is designed so that if you create the recommended REM This batch file is designed so that if you create the recommended
REM folder structure then it can be used 'as is' without modification. REM folder structure then it can be used 'as is' without modification.
REM
REM More information on the Environment Variables used by Calibre can
REM be found at:
REM http://manual.calibre-ebook.com/customize.html#environment-variables
REM
REM The documentation for this file in the Calibre manual can be found at:
REM http://manual.calibre-ebook.com/portable.html
REM
REM CHANGE HISTORY
REM ¬¬¬¬¬¬¬¬¬¬¬¬¬¬
REM 22 Jan 2012 itimpi - Updated to keep it in line with the calibre-portable.sh
REM file for Linux systems
REM ------------------------------------- REM -------------------------------------
@ -36,7 +52,7 @@ REM -------------------------------------
IF EXIST CalibreConfig ( IF EXIST CalibreConfig (
SET CALIBRE_CONFIG_DIRECTORY=%cd%\CalibreConfig SET CALIBRE_CONFIG_DIRECTORY=%cd%\CalibreConfig
ECHO CONFIG FILES: %cd%\CalibreConfig ECHO CONFIG FILES: %cd%\CalibreConfig
) )
@ -54,11 +70,11 @@ REM --------------------------------------------------------------
IF EXIST U:\eBooks\CalibreLibrary ( IF EXIST U:\eBooks\CalibreLibrary (
SET CALIBRE_LIBRARY_DIRECTORY=U:\eBOOKS\CalibreLibrary SET CALIBRE_LIBRARY_DIRECTORY=U:\eBOOKS\CalibreLibrary
ECHO LIBRARY FILES: U:\eBOOKS\CalibreLibrary ECHO LIBRARY FILES: U:\eBOOKS\CalibreLibrary
) )
IF EXIST CalibreLibrary ( IF EXIST CalibreLibrary (
SET CALIBRE_LIBRARY_DIRECTORY=%cd%\CalibreLibrary SET CALIBRE_LIBRARY_DIRECTORY=%cd%\CalibreLibrary
ECHO LIBRARY FILES: %cd%\CalibreLibrary ECHO LIBRARY FILES: %cd%\CalibreLibrary
) )
@ -67,20 +83,23 @@ REM Specify Location of metadata database (optional)
REM REM
REM Location where the metadata.db file is located. If not set REM Location where the metadata.db file is located. If not set
REM the same location as Books files will be assumed. This. REM the same location as Books files will be assumed. This.
REM options is used to get better performance when the Library is REM option is typically set to get better performance when the
REM on a (slow) network drive. Putting the metadata.db file REM Library is on a (slow) network drive. Putting the metadata.db
REM locally then makes gives a big performance improvement. REM file locally then makes gives a big performance improvement.
REM REM
REM NOTE. If you use this option, then the ability to switch REM NOTE. If you use this option, then the ability to switch
REM libraries within Calibre will be disabled. Therefore REM libraries within Calibre will be disabled. Therefore
REM you do not want to set it if the metadata.db file REM you do not want to set it if the metadata.db file
REM is at the same location as the book files. REM is at the same location as the book files.
REM
REM Another point to watch is that plugins can cause problems
REM as they often store absolute path information
REM -------------------------------------------------------------- REM --------------------------------------------------------------
IF EXIST %cd%\CalibreMetadata\metadata.db ( IF EXIST %cd%\CalibreMetadata\metadata.db (
IF NOT "%CALIBRE_LIBRARY_DIRECTORY%" == "%cd%\CalibreMetadata" ( IF NOT "%CALIBRE_LIBRARY_DIRECTORY%" == "%cd%\CalibreMetadata" (
SET CALIBRE_OVERRIDE_DATABASE_PATH=%cd%\CalibreMetadata\metadata.db SET CALIBRE_OVERRIDE_DATABASE_PATH=%cd%\CalibreMetadata\metadata.db
ECHO DATABASE: %cd%\CalibreMetadata\metadata.db ECHO DATABASE: %cd%\CalibreMetadata\metadata.db
ECHO ' ECHO '
ECHO ***CAUTION*** Library Switching will be disabled ECHO ***CAUTION*** Library Switching will be disabled
ECHO ' ECHO '
@ -94,61 +113,79 @@ REM It is easy to run Calibre from source
REM Just set the environment variable to where the source is located REM Just set the environment variable to where the source is located
REM When running from source the GUI will have a '*' after the version. REM When running from source the GUI will have a '*' after the version.
REM number that is displayed at the bottom of the Calibre main screen. REM number that is displayed at the bottom of the Calibre main screen.
REM
REM More information on setting up a development environment can
REM be found at:
REM http://manual.calibre-ebook.com/develop.html#develop
REM -------------------------------------------------------------- REM --------------------------------------------------------------
IF EXIST CalibreSource\src ( IF EXIST CalibreSource\src (
SET CALIBRE_DEVELOP_FROM=%cd%\CalibreSource\src SET CALIBRE_DEVELOP_FROM=%cd%\CalibreSource\src
ECHO SOURCE FILES: %cd%\CalibreSource\src ECHO SOURCE FILES: %cd%\CalibreSource\src
) ELSE (
ECHO SOURCE FILES: *** Not being Used ***
) )
REM -------------------------------------------------------------- REM --------------------------------------------------------------
REM Specify Location of calibre binaries (optional) REM Specify Location of calibre Windows binaries (optional)
REM REM
REM To avoid needing Calibre to be set in the search path, ensure REM To avoid needing Calibre to be set in the search path, ensure
REM that Calibre Program Files is current directory when starting. REM that Calibre Program Files is current directory when starting.
REM The following test falls back to using search path . REM The following test falls back to using search path .
REM This folder can be populated by copying the Calibre2 folder from REM This folder can be populated by copying the Calibre2 folder from
REM an existing installation or by installing direct to here. REM an existing installation or by installing direct to here.
REM
REM NOTE. Do not try and put both Windows and Linux binaries into
REM same folder as this can cause problems.
REM -------------------------------------------------------------- REM --------------------------------------------------------------
IF EXIST %cd%\Calibre2 ( IF EXIST %cd%\Calibre2 (
CD %cd%\Calibre2 CD %cd%\Calibre2
ECHO PROGRAM FILES: %cd% ECHO PROGRAM FILES: %cd%
) ELSE (
ECHO PROGRAM FILES: *** Use System search PATH ***
) )
REM -------------------------------------------------------------- REM --------------------------------------------------------------
REM Location of Calibre Temporary files (optional) REM Location of Calibre Temporary files (optional)
REM REM
REM Calibre creates a lot of temproary files while running REM Calibre creates a lot of temporary files while running
REM In theory these are removed when Calibre finishes, but REM In theory these are removed when Calibre finishes, but
REM in practise files can be left behind (particularily if REM in practise files can be left behind (particularily if
REM any errors occur. Using this option allows some REM any errors occur). Using this option allows some
REM explicit clean-up of these files. REM explicit clean-up of these files.
REM If not set Calibre uses the normal system TEMP location REM If not set Calibre uses the normal system TEMP location
REM -------------------------------------------------------------- REM --------------------------------------------------------------
SET CALIBRE_TEMP_DIR=%TEMP%\CALIBRE_TEMP SET CALIBRE_TEMP_DIR=%TEMP%\CALIBRE_TEMP
ECHO TEMPORARY FILES: %CALIBRE_TEMP_DIR% ECHO TEMPORARY FILES: %CALIBRE_TEMP_DIR%
IF NOT "%CALIBRE_TEMP_DIR%" == "" ( IF EXIST "%CALIBRE_TEMP_DIR%" RMDIR /s /q "%CALIBRE_TEMP_DIR%"
IF EXIST "%CALIBRE_TEMP_DIR%" RMDIR /s /q "%CALIBRE_TEMP_DIR%" MKDIR "%CALIBRE_TEMP_DIR%"
MKDIR "%CALIBRE_TEMP_DIR%" REM set the following for any components that do
REM set the following for any components that do REM not obey the CALIBRE_TEMP_DIR setting
REM not obey the CALIBRE_TEMP_DIR setting SET TMP=%CALIBRE_TEMP_DIR%
SET TMP=%CALIBRE_TEMP_DIR% SET TEMP=%CALIBRE_TEMP_DIR%
SET TEMP=%CALIBRE_TEMP_DIR%
)
REM --------------------------------------------------------------
REM Set the Interface language (optional)
REM
REM If not set Calibre uses the language set in Preferences
REM --------------------------------------------------------------
SET CALIBRE_OVERRIDE_LANG=EN
ECHO INTERFACE LANGUAGE: %CALIBRE_OVERRIDE_LANG%
REM ---------------------------------------------------------- REM ----------------------------------------------------------
REM The following gives a chance to check the settings before REM The following gives a chance to check the settings before
REM starting Calibre. It can be commented out if not wanted. REM starting Calibre. It can be commented out if not wanted.
REM ---------------------------------------------------------- REM ----------------------------------------------------------
ECHO ' ECHO '
ECHO "Press CTRL-C if you do not want to continue" ECHO Press CTRL-C if you do not want to continue
PAUSE PAUSE
@ -160,11 +197,12 @@ REM responsive while Calibre is running. Within Calibre itself
REM the backgound processes should be set to run with 'low' priority. REM the backgound processes should be set to run with 'low' priority.
REM Using the START command starts up Calibre in a separate process. REM Using the START command starts up Calibre in a separate process.
REM If used without /WAIT opotion launches Calibre and contines batch file. REM If used without /WAIT option it launches Calibre and contines batch file.
REM normally this would simply run off the end and close the Command window.
REM Use with /WAIT to wait until Calibre completes to run a task on exit REM Use with /WAIT to wait until Calibre completes to run a task on exit
REM -------------------------------------------------------- REM --------------------------------------------------------
ECHO "Starting up Calibre" ECHO "Starting up Calibre"
ECHO OFF ECHO OFF
ECHO %cd% ECHO %cd%
START /belownormal Calibre --with-library "%CALIBRE_LIBRARY_DIRECTORY%" START /belownormal Calibre --with-library "%CALIBRE_LIBRARY_DIRECTORY%"

View File

@ -0,0 +1,220 @@
#!/bin/sh
# Calibre-Portable.sh
# ¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬¬
#
# Shell script File to start a Calibre configuration on Linux
# giving explicit control of the location of:
# - Calibre Program Files
# - Calibre Library Files
# - Calibre Config Files
# - Calibre Metadata database
# - Calibre Source files
# - Calibre Temp Files
# By setting the paths correctly it can be used to run:
# - A "portable calibre" off a USB stick.
# - A network installation with local metadata database
# (for performance) and books stored on a network share
# - A local installation using customised settings
#
# If trying to run off a USB stick then the folder structure
# shown below is recommended (relative to the location of
# this script file). This can structure can also be used
# when running of a local hard disk if you want to get the
# level of control this script file provides.
# - Calibre Location of linux program files
# - CalibreConfig Location of Configuration files
# - CalibreLibrary Location of Books and metadata
# - CalibreSource Location of Calibre Source files (Optional)
#
# This script file is designed so that if you create the recommended
# folder structure then it can be used 'as is' without modification.
#
# More information on the Environment Variables used by Calibre can
# be found at:
# http://manual.calibre-ebook.com/customize.html#environment-variables
#
# The documentation for this file in the Calibre manual can be found at:
# http://manual.calibre-ebook.com/portable.html
#
# NOTE: It is quite possible to have both Windows and Linux binaries on the same
# USB stick but set up to use the same calibre settings otherwise.
# In this case you use:
# - calibre-portable.bat to run the Windows version
# = calibre-portable.sh to run the Linux version
#
# CHANGE HISTORY
# ¬¬¬¬¬¬¬¬¬¬¬¬¬¬
# 22 Jan 2012 itimpi - First version based on the calibre-portable.bat file for Windows
# It should have identical functionality but for a linux environment.
# It might work on MacOS but that has not been validated
# -------------------------------------
# Set up Calibre Config folder
#
# This is where user specific settings
# are stored.
# -------------------------------------
if [ -d CalibreConfig ]
then
CALIBRE_CONFIG_DIRECTORY=`pwd`/CalibreConfig
echo "CONFIG FILES: "`pwd`"/CalibreConfig"
export CALIBRE_CONFIG_DIRECTORY
fi
# --------------------------------------------------------------
# Specify Location of ebooks
#
# Location where Book files are located
# Either set explicit path, or if running from a USB stick
# a relative path can be used to avoid need to know the
# drive letter of the USB stick.
#
# Comment out any of the following that are not to be used
# (although leaving them in does not really matter)
# --------------------------------------------------------------
if [ -d /eBooks/CalibreLibrary ]
then
SET CALIBRE_LIBRARY_DIRECTORY=/eBOOKS/CalibreLibrary
echo "LIBRARY FILES: /eBOOKS/CalibreLibrary"
export LIBRARY_FILES
fi
if [ -d `pwd`/CalibreLibrary ]
then
CALIBRE_LIBRARY_DIRECTORY=`pwd`/CalibreLibrary
echo "LIBRARY FILES: "`pwd`"/CalibreLibrary"
export LIBRARY_FILES
fi
# --------------------------------------------------------------
# Specify Location of metadata database (optional)
#
# Location where the metadata.db file is located. If not set
# then the same location as Books files will be assumed. This.
# options is typically used to get better performance when the
# Library is on a (slow) network drive. Putting the metadata.db
# file locally then makes gives a big performance improvement.
#
# NOTE. If you use this option, then the ability to switch
# libraries within Calibre will be disabled. Therefore
# you do not want to set it if the metadata.db file
# is at the same location as the book files.
#
# Another point to watch is that plugins can cause problems
# as they often store absolute path information
# --------------------------------------------------------------
if [ -d `pwd`/CalibreMetadata/metadata.db ]
then
if [ $CALIBRE_LIBRARY_DIRECTORY != `pwd`/CalibreMetadata ]
then
CALIBRE_OVERRIDE_DATABASE_PATH=`pwd`/CalibreMetadata/metadata.db
echo DATABASE: `pwd`"/CalibreMetadata/metadata.db"
export CALIBRE_OVERRIDE_DATABASE
echo
echo "***CAUTION*** Library Switching will be disabled"
echo
fi
fi
# --------------------------------------------------------------
# Specify Location of source (optional)
#
# It is easy to run Calibre from source
# Just set the environment variable to where the source is located
# When running from source the GUI will have a '*' after the version.
# number that is displayed at the bottom of the Calibre main screen.
#
# More information on setting up a development environment can
# be found at:
# http://manual.calibre-ebook.com/develop.html#develop
# --------------------------------------------------------------
if [ -d CalibreSource/src ]
then
CALIBRE_DEVELOP_FROM=`pwd`/CalibreSource/src
echo "SOURCE FILES: "`pwd`"/CalibreSource/src"
export CALIBRE_DEVELOP_FROM
else
echo "SOURCE FILES: *** Not being Used ***"
fi
# --------------------------------------------------------------
# Specify Location of calibre linux binaries (optional)
#
# To avoid needing Calibre to be set in the search path, ensure
# that Calibre Program Files is current directory when starting.
# The following test falls back to using search path.
#
# This folder can be populated by copying the /opt/calibre folder
# from an existing installation or by installing direct to here.
#
# NOTE. Do not try and put both Windows and Linux binaries into
# same folder as this can cause problems.
# --------------------------------------------------------------
if [ -d `pwd`/Calibre ]
then
cd `pwd`/Calibre
echo "PROGRAM FILES: "`pwd`
else
echo "PROGRAM FILES: *** Using System search path ***"
fi
# --------------------------------------------------------------
# Location of Calibre Temporary files (optional)
#
# Calibre creates a lot of temporary files while running
# In theory these are removed when Calibre finishes, but
# in practise files can be left behind (particularly if
# a crash occurs). Using this option allows some
# explicit clean-up of these files.
# If not set Calibre uses the normal system TEMP location
# --------------------------------------------------------------
CALIBRE_TEMP_DIR=/tmp/CALIBRE_TEMP
echo "TEMPORARY FILES: $CALIBRE_TEMP_DIR"
if [ -d "$CALIBRE_TEMP_DIR" ]
then
rm -fr "$CALIBRE_TEMP_DIR"
fi
mkdir "$CALIBRE_TEMP_DIR"
# set the following for any components that do
# not obey the CALIBRE_TEMP_DIR setting
# --------------------------------------------------------------
# Set the Interface language (optional)
#
# If not set Calibre uses the language set in Preferences
# --------------------------------------------------------------
CALIBRE_OVERRIDE_LANG=EN
echo "INTERFACE LANGUAGE: $CALIBRE_OVERRIDE_LANG"
export CALIBRE_OVERRIDE_LANG
# ----------------------------------------------------------
# The following gives a chance to check the settings before
# starting Calibre. It can be commented out if not wanted.
# ----------------------------------------------------------
echo
echo "Press CTRL-C if you do not want to continue"
echo "Press ENTER to continue and start Calibre"
read DUMMY
# --------------------------------------------------------
# Start up the calibre program.
# --------------------------------------------------------
echo "Starting up Calibre"
echo `pwd`
calibre --with-library "$CALIBRE_LIBRARY_DIRECTORY"

View File

@ -128,6 +128,17 @@ categories_collapsed_name_template = r'{first.sort:shorten(4,,0)} - {last.sort:s
categories_collapsed_rating_template = r'{first.avg_rating:4.2f:ifempty(0)} - {last.avg_rating:4.2f:ifempty(0)}' categories_collapsed_rating_template = r'{first.avg_rating:4.2f:ifempty(0)} - {last.avg_rating:4.2f:ifempty(0)}'
categories_collapsed_popularity_template = r'{first.count:d} - {last.count:d}' categories_collapsed_popularity_template = r'{first.count:d} - {last.count:d}'
#: Control order of categories in the tag browser
# Change the following dict to change the order that categories are displayed in
# the tag browser. Items are named using their lookup name, and will be sorted
# using the number supplied. The lookup name '*' stands for all names that
# otherwise do not appear. Two names with the same value will be sorted
# using the default order; the one used when the dict is empty.
# Example: tag_browser_category_order = {'series':1, 'tags':2, '*':3}
# resulting in the order series, tags, then everything else in default order.
tag_browser_category_order = {'*':1}
#: Specify columns to sort the booklist by on startup #: Specify columns to sort the booklist by on startup
# Provide a set of columns to be sorted on when calibre starts # Provide a set of columns to be sorted on when calibre starts
# The argument is None if saved sort history is to be used # The argument is None if saved sort history is to be used
@ -374,10 +385,11 @@ maximum_resort_levels = 5
# the fields that are being displayed. # the fields that are being displayed.
sort_dates_using_visible_fields = False sort_dates_using_visible_fields = False
#: Specify which font to use when generating a default cover #: Specify which font to use when generating a default cover or masthead
# Absolute path to .ttf font files to use as the fonts for the title, author # Absolute path to .ttf font files to use as the fonts for the title, author
# and footer when generating a default cover. Useful if the default font (Liberation # and footer when generating a default cover or masthead image. Useful if the
# Serif) does not contain glyphs for the language of the books in your library. # default font (Liberation Serif) does not contain glyphs for the language of
# the books in your library.
generate_cover_title_font = None generate_cover_title_font = None
generate_cover_foot_font = None generate_cover_foot_font = None
@ -484,3 +496,19 @@ gui_view_history_size = 15
# prefer HTMLZ to EPUB for tweaking, change this to 'htmlz' # prefer HTMLZ to EPUB for tweaking, change this to 'htmlz'
tweak_book_prefer = 'epub' tweak_book_prefer = 'epub'
#: Change the font size of book details in the interface
# Change the font size at which book details are rendered in the side panel and
# comments are rendered in the metadata edit dialog. Set it to a positive or
# negative number to increase or decrease the font size.
change_book_details_font_size_by = 0
#: Compile General Program Mode templates to Python
# Compiled general program mode templates are significantly faster than
# interpreted templates. Setting this tweak to True causes calibre to compile
# (in most cases) general program mode templates. Setting it to False causes
# calibre to use the old behavior -- interpreting the templates. Set the tweak
# to False if some compiled templates produce incorrect values.
# Default: compile_gpm_templates = True
# No compile: compile_gpm_templates = False
compile_gpm_templates = True

Binary file not shown.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

After

Width:  |  Height:  |  Size: 85 KiB

View File

@ -14,7 +14,7 @@ from setup.build_environment import msvc, MT, RC
from setup.installer.windows.wix import WixMixIn from setup.installer.windows.wix import WixMixIn
OPENSSL_DIR = r'Q:\openssl' OPENSSL_DIR = r'Q:\openssl'
QT_DIR = 'Q:\\Qt\\4.7.3' QT_DIR = 'Q:\\Qt\\4.8.0'
QT_DLLS = ['Core', 'Gui', 'Network', 'Svg', 'WebKit', 'Xml', 'XmlPatterns'] QT_DLLS = ['Core', 'Gui', 'Network', 'Svg', 'WebKit', 'Xml', 'XmlPatterns']
LIBUNRAR = 'C:\\Program Files\\UnrarDLL\\unrar.dll' LIBUNRAR = 'C:\\Program Files\\UnrarDLL\\unrar.dll'
SW = r'C:\cygwin\home\kovid\sw' SW = r'C:\cygwin\home\kovid\sw'

View File

@ -97,7 +97,9 @@ Now, run configure and make::
-no-plugin-manifests is needed so that loading the plugins does not fail looking for the CRT assembly -no-plugin-manifests is needed so that loading the plugins does not fail looking for the CRT assembly
configure -opensource -release -qt-zlib -qt-gif -qt-libmng -qt-libpng -qt-libtiff -qt-libjpeg -release -platform win32-msvc2008 -no-qt3support -webkit -xmlpatterns -no-phonon -no-style-plastique -no-style-cleanlooks -no-style-motif -no-style-cde -no-declarative -no-scripttools -no-audio-backend -no-multimedia -no-dbus -no-openvg -no-opengl -no-qt3support -confirm-license -nomake examples -nomake demos -nomake docs -no-plugin-manifests -openssl -I Q:\openssl\include -L Q:\openssl\lib && nmake configure -opensource -release -qt-zlib -qt-libmng -qt-libpng -qt-libtiff -qt-libjpeg -release -platform win32-msvc2008 -no-qt3support -webkit -xmlpatterns -no-phonon -no-style-plastique -no-style-cleanlooks -no-style-motif -no-style-cde -no-declarative -no-scripttools -no-audio-backend -no-multimedia -no-dbus -no-openvg -no-opengl -no-qt3support -confirm-license -nomake examples -nomake demos -nomake docs -no-plugin-manifests -openssl -I Q:\openssl\include -L Q:\openssl\lib && nmake
Add the path to the bin folder inside the Qt dir to your system PATH.
SIP SIP
----- -----

View File

@ -53,7 +53,7 @@ void show_last_error(LPCTSTR preamble) {
NULL, NULL,
dw, dw,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
&msg, (LPTSTR)&msg,
0, NULL ); 0, NULL );
show_detailed_error(preamble, msg, (int)dw); show_detailed_error(preamble, msg, (int)dw);
@ -136,7 +136,7 @@ void launch_calibre(LPCTSTR exe, LPCTSTR config_dir, LPCTSTR library_dir) {
int WINAPI wWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, PWSTR pCmdLine, int nCmdShow) int WINAPI wWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, PWSTR pCmdLine, int nCmdShow)
{ {
LPTSTR app_dir, config_dir, exe, library_dir; LPTSTR app_dir, config_dir, exe, library_dir, too_long;
app_dir = get_app_dir(); app_dir = get_app_dir();
config_dir = (LPTSTR)calloc(BUFSIZE, sizeof(TCHAR)); config_dir = (LPTSTR)calloc(BUFSIZE, sizeof(TCHAR));
@ -147,7 +147,15 @@ int WINAPI wWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, PWSTR pCmdLine
_sntprintf_s(exe, BUFSIZE, _TRUNCATE, _T("%sCalibre\\calibre.exe"), app_dir); _sntprintf_s(exe, BUFSIZE, _TRUNCATE, _T("%sCalibre\\calibre.exe"), app_dir);
_sntprintf_s(library_dir, BUFSIZE, _TRUNCATE, _T("%sCalibre Library"), app_dir); _sntprintf_s(library_dir, BUFSIZE, _TRUNCATE, _T("%sCalibre Library"), app_dir);
launch_calibre(exe, config_dir, library_dir); if ( _tcscnlen(library_dir, BUFSIZE) <= 74 ) {
launch_calibre(exe, config_dir, library_dir);
} else {
too_long = (LPTSTR)calloc(BUFSIZE+300, sizeof(TCHAR));
_sntprintf_s(too_long, BUFSIZE+300, _TRUNCATE,
_T("Path to Calibre Portable (%s) too long. Must be less than 59 characters."), app_dir);
show_error(too_long);
}
free(app_dir); free(config_dir); free(exe); free(library_dir); free(app_dir); free(config_dir); free(exe); free(library_dir);

View File

@ -18,14 +18,14 @@ msgstr ""
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-" "Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
"devel@lists.alioth.debian.org>\n" "devel@lists.alioth.debian.org>\n"
"POT-Creation-Date: 2011-11-25 14:01+0000\n" "POT-Creation-Date: 2011-11-25 14:01+0000\n"
"PO-Revision-Date: 2012-01-14 02:30+0000\n" "PO-Revision-Date: 2012-03-05 19:08+0000\n"
"Last-Translator: Wolfgang Rohdewald <wolfgang@rohdewald.de>\n" "Last-Translator: Dennis Baudys <Unknown>\n"
"Language-Team: German <debian-l10n-german@lists.debian.org>\n" "Language-Team: German <debian-l10n-german@lists.debian.org>\n"
"MIME-Version: 1.0\n" "MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n" "Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n" "Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2012-01-15 05:18+0000\n" "X-Launchpad-Export-Date: 2012-03-06 04:47+0000\n"
"X-Generator: Launchpad (build 14664)\n" "X-Generator: Launchpad (build 14900)\n"
"Language: de\n" "Language: de\n"
#. name for aaa #. name for aaa
@ -5871,7 +5871,7 @@ msgstr ""
#. name for cym #. name for cym
msgid "Welsh" msgid "Welsh"
msgstr "Kymrisch" msgstr "Walisisch"
#. name for cyo #. name for cyo
msgid "Cuyonon" msgid "Cuyonon"

File diff suppressed because it is too large Load Diff

View File

@ -8,14 +8,14 @@ msgstr ""
"Project-Id-Version: calibre\n" "Project-Id-Version: calibre\n"
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" "Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
"POT-Creation-Date: 2011-11-25 14:01+0000\n" "POT-Creation-Date: 2011-11-25 14:01+0000\n"
"PO-Revision-Date: 2011-12-17 09:29+0000\n" "PO-Revision-Date: 2012-03-11 10:13+0000\n"
"Last-Translator: Jellby <Unknown>\n" "Last-Translator: Jellby <Unknown>\n"
"Language-Team: Spanish <es@li.org>\n" "Language-Team: Spanish <es@li.org>\n"
"MIME-Version: 1.0\n" "MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n" "Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n" "Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-12-18 04:37+0000\n" "X-Launchpad-Export-Date: 2012-03-12 04:38+0000\n"
"X-Generator: Launchpad (build 14525)\n" "X-Generator: Launchpad (build 14933)\n"
#. name for aaa #. name for aaa
msgid "Ghotuo" msgid "Ghotuo"
@ -1779,7 +1779,7 @@ msgstr "Awiyaana"
#. name for auz #. name for auz
msgid "Arabic; Uzbeki" msgid "Arabic; Uzbeki"
msgstr "Árabe uzbeco" msgstr "Árabe uzbeko"
#. name for ava #. name for ava
msgid "Avaric" msgid "Avaric"
@ -22207,7 +22207,7 @@ msgstr "Roglai septentrional"
#. name for roh #. name for roh
msgid "Romansh" msgid "Romansh"
msgstr "" msgstr "Romanche"
#. name for rol #. name for rol
msgid "Romblomanon" msgid "Romblomanon"
@ -22607,7 +22607,7 @@ msgstr ""
#. name for sci #. name for sci
msgid "Creole Malay; Sri Lankan" msgid "Creole Malay; Sri Lankan"
msgstr "Malo criollo de Sri Lanka" msgstr "Malayo criollo de Sri Lanka"
#. name for sck #. name for sck
msgid "Sadri" msgid "Sadri"
@ -26987,15 +26987,15 @@ msgstr ""
#. name for uzb #. name for uzb
msgid "Uzbek" msgid "Uzbek"
msgstr "Uzbeco" msgstr "Uzbeko"
#. name for uzn #. name for uzn
msgid "Uzbek; Northern" msgid "Uzbek; Northern"
msgstr "Uzbeco septentrional" msgstr "Uzbeko septentrional"
#. name for uzs #. name for uzs
msgid "Uzbek; Southern" msgid "Uzbek; Southern"
msgstr "Uzbeco meridional" msgstr "Uzbeko meridional"
#. name for vaa #. name for vaa
msgid "Vaagri Booli" msgid "Vaagri Booli"
@ -30319,7 +30319,7 @@ msgstr ""
#. name for zhn #. name for zhn
msgid "Zhuang; Nong" msgid "Zhuang; Nong"
msgstr "Zhuang nong" msgstr "Chuang nong"
#. name for zho #. name for zho
msgid "Chinese" msgid "Chinese"

View File

@ -9,67 +9,67 @@ msgstr ""
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-" "Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
"devel@lists.alioth.debian.org>\n" "devel@lists.alioth.debian.org>\n"
"POT-Creation-Date: 2011-11-25 14:01+0000\n" "POT-Creation-Date: 2011-11-25 14:01+0000\n"
"PO-Revision-Date: 2011-09-27 15:37+0000\n" "PO-Revision-Date: 2012-03-06 13:55+0000\n"
"Last-Translator: Piarres Beobide <pi@beobide.net>\n" "Last-Translator: Asier Iturralde Sarasola <Unknown>\n"
"Language-Team: Euskara <itzulpena@comtropos.com>\n" "Language-Team: Euskara <itzulpena@comtropos.com>\n"
"MIME-Version: 1.0\n" "MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n" "Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n" "Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-11-26 05:07+0000\n" "X-Launchpad-Export-Date: 2012-03-07 05:12+0000\n"
"X-Generator: Launchpad (build 14381)\n" "X-Generator: Launchpad (build 14907)\n"
"Language: eu\n" "Language: eu\n"
#. name for aaa #. name for aaa
msgid "Ghotuo" msgid "Ghotuo"
msgstr "" msgstr "Ghotuo"
#. name for aab #. name for aab
msgid "Alumu-Tesu" msgid "Alumu-Tesu"
msgstr "" msgstr "Alumu-Tesu"
#. name for aac #. name for aac
msgid "Ari" msgid "Ari"
msgstr "" msgstr "Ari"
#. name for aad #. name for aad
msgid "Amal" msgid "Amal"
msgstr "" msgstr "Amal"
#. name for aae #. name for aae
msgid "Albanian; Arbëreshë" msgid "Albanian; Arbëreshë"
msgstr "" msgstr "Albaniera; Arbëreshë"
#. name for aaf #. name for aaf
msgid "Aranadan" msgid "Aranadan"
msgstr "" msgstr "Aranadan"
#. name for aag #. name for aag
msgid "Ambrak" msgid "Ambrak"
msgstr "" msgstr "Ambrak"
#. name for aah #. name for aah
msgid "Arapesh; Abu'" msgid "Arapesh; Abu'"
msgstr "" msgstr "Arapesh; Abu'"
#. name for aai #. name for aai
msgid "Arifama-Miniafia" msgid "Arifama-Miniafia"
msgstr "" msgstr "Arifama-Miniafia"
#. name for aak #. name for aak
msgid "Ankave" msgid "Ankave"
msgstr "" msgstr "Ankave"
#. name for aal #. name for aal
msgid "Afade" msgid "Afade"
msgstr "" msgstr "Afade"
#. name for aam #. name for aam
msgid "Aramanik" msgid "Aramanik"
msgstr "" msgstr "Aramanik"
#. name for aan #. name for aan
msgid "Anambé" msgid "Anambé"
msgstr "" msgstr "Anambé"
#. name for aao #. name for aao
msgid "Arabic; Algerian Saharan" msgid "Arabic; Algerian Saharan"
@ -77,107 +77,107 @@ msgstr ""
#. name for aap #. name for aap
msgid "Arára; Pará" msgid "Arára; Pará"
msgstr "" msgstr "Arára; Pará"
#. name for aaq #. name for aaq
msgid "Abnaki; Eastern" msgid "Abnaki; Eastern"
msgstr "" msgstr "Abnaki; Ekialdekoa"
#. name for aar #. name for aar
msgid "Afar" msgid "Afar"
msgstr "" msgstr "Afarera"
#. name for aas #. name for aas
msgid "Aasáx" msgid "Aasáx"
msgstr "" msgstr "Aasáx"
#. name for aat #. name for aat
msgid "Albanian; Arvanitika" msgid "Albanian; Arvanitika"
msgstr "" msgstr "Albaniera; Arvanitika"
#. name for aau #. name for aau
msgid "Abau" msgid "Abau"
msgstr "" msgstr "Abau"
#. name for aaw #. name for aaw
msgid "Solong" msgid "Solong"
msgstr "" msgstr "Solong"
#. name for aax #. name for aax
msgid "Mandobo Atas" msgid "Mandobo Atas"
msgstr "" msgstr "Mandobo Atas"
#. name for aaz #. name for aaz
msgid "Amarasi" msgid "Amarasi"
msgstr "" msgstr "Amarasi"
#. name for aba #. name for aba
msgid "Abé" msgid "Abé"
msgstr "" msgstr "Abé"
#. name for abb #. name for abb
msgid "Bankon" msgid "Bankon"
msgstr "" msgstr "Bankon"
#. name for abc #. name for abc
msgid "Ayta; Ambala" msgid "Ayta; Ambala"
msgstr "" msgstr "Ayta; Ambala"
#. name for abd #. name for abd
msgid "Manide" msgid "Manide"
msgstr "" msgstr "Manide"
#. name for abe #. name for abe
msgid "Abnaki; Western" msgid "Abnaki; Western"
msgstr "" msgstr "Abnaki; Mendebaldekoa"
#. name for abf #. name for abf
msgid "Abai Sungai" msgid "Abai Sungai"
msgstr "" msgstr "Abai Sungai"
#. name for abg #. name for abg
msgid "Abaga" msgid "Abaga"
msgstr "" msgstr "Abaga"
#. name for abh #. name for abh
msgid "Arabic; Tajiki" msgid "Arabic; Tajiki"
msgstr "" msgstr "Arabiera; Tajiki"
#. name for abi #. name for abi
msgid "Abidji" msgid "Abidji"
msgstr "" msgstr "Abidji"
#. name for abj #. name for abj
msgid "Aka-Bea" msgid "Aka-Bea"
msgstr "" msgstr "Aka-Bea"
#. name for abk #. name for abk
msgid "Abkhazian" msgid "Abkhazian"
msgstr "" msgstr "Abkhazera"
#. name for abl #. name for abl
msgid "Lampung Nyo" msgid "Lampung Nyo"
msgstr "" msgstr "Lampung Nyo"
#. name for abm #. name for abm
msgid "Abanyom" msgid "Abanyom"
msgstr "" msgstr "Abanyom"
#. name for abn #. name for abn
msgid "Abua" msgid "Abua"
msgstr "" msgstr "Abua"
#. name for abo #. name for abo
msgid "Abon" msgid "Abon"
msgstr "" msgstr "Abon"
#. name for abp #. name for abp
msgid "Ayta; Abellen" msgid "Ayta; Abellen"
msgstr "" msgstr "Ayta; Abellen"
#. name for abq #. name for abq
msgid "Abaza" msgid "Abaza"
msgstr "" msgstr "Abazera"
#. name for abr #. name for abr
msgid "Abron" msgid "Abron"

File diff suppressed because it is too large Load Diff

View File

@ -8,119 +8,119 @@ msgstr ""
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-" "Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
"devel@lists.alioth.debian.org>\n" "devel@lists.alioth.debian.org>\n"
"POT-Creation-Date: 2011-11-25 14:01+0000\n" "POT-Creation-Date: 2011-11-25 14:01+0000\n"
"PO-Revision-Date: 2011-09-27 15:42+0000\n" "PO-Revision-Date: 2012-03-14 21:30+0000\n"
"Last-Translator: Kovid Goyal <Unknown>\n" "Last-Translator: Иван Старчевић <ivanstar61@gmail.com>\n"
"Language-Team: Serbian <gnu@prevod.org>\n" "Language-Team: Serbian <gnu@prevod.org>\n"
"MIME-Version: 1.0\n" "MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n" "Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n" "Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-11-26 05:36+0000\n" "X-Launchpad-Export-Date: 2012-03-15 04:45+0000\n"
"X-Generator: Launchpad (build 14381)\n" "X-Generator: Launchpad (build 14933)\n"
"Language: sr\n" "Language: sr\n"
#. name for aaa #. name for aaa
msgid "Ghotuo" msgid "Ghotuo"
msgstr "" msgstr "Готуо"
#. name for aab #. name for aab
msgid "Alumu-Tesu" msgid "Alumu-Tesu"
msgstr "" msgstr "Алуму-Тесу"
#. name for aac #. name for aac
msgid "Ari" msgid "Ari"
msgstr "" msgstr "Ари"
#. name for aad #. name for aad
msgid "Amal" msgid "Amal"
msgstr "" msgstr "Амал"
#. name for aae #. name for aae
msgid "Albanian; Arbëreshë" msgid "Albanian; Arbëreshë"
msgstr "" msgstr "Албански; Арбереше"
#. name for aaf #. name for aaf
msgid "Aranadan" msgid "Aranadan"
msgstr "" msgstr "Аранадан"
#. name for aag #. name for aag
msgid "Ambrak" msgid "Ambrak"
msgstr "" msgstr "Амбрак"
#. name for aah #. name for aah
msgid "Arapesh; Abu'" msgid "Arapesh; Abu'"
msgstr "" msgstr "Арабеш; Абу'"
#. name for aai #. name for aai
msgid "Arifama-Miniafia" msgid "Arifama-Miniafia"
msgstr "" msgstr "Арифама-Миниафиа"
#. name for aak #. name for aak
msgid "Ankave" msgid "Ankave"
msgstr "" msgstr "Анкаве"
#. name for aal #. name for aal
msgid "Afade" msgid "Afade"
msgstr "" msgstr "Афаде"
#. name for aam #. name for aam
msgid "Aramanik" msgid "Aramanik"
msgstr "" msgstr "Араманик"
#. name for aan #. name for aan
msgid "Anambé" msgid "Anambé"
msgstr "" msgstr "Анамбе"
#. name for aao #. name for aao
msgid "Arabic; Algerian Saharan" msgid "Arabic; Algerian Saharan"
msgstr "" msgstr "Арапски; Алжирска Сахара"
#. name for aap #. name for aap
msgid "Arára; Pará" msgid "Arára; Pará"
msgstr "" msgstr "Арара;Пара"
#. name for aaq #. name for aaq
msgid "Abnaki; Eastern" msgid "Abnaki; Eastern"
msgstr "" msgstr "Абнаки;Источни"
#. name for aar #. name for aar
msgid "Afar" msgid "Afar"
msgstr "афар" msgstr "Афар"
#. name for aas #. name for aas
msgid "Aasáx" msgid "Aasáx"
msgstr "" msgstr "Асакс"
#. name for aat #. name for aat
msgid "Albanian; Arvanitika" msgid "Albanian; Arvanitika"
msgstr "" msgstr "Албански (арванитска)"
#. name for aau #. name for aau
msgid "Abau" msgid "Abau"
msgstr "" msgstr "Абау"
#. name for aaw #. name for aaw
msgid "Solong" msgid "Solong"
msgstr "" msgstr "Солонг"
#. name for aax #. name for aax
msgid "Mandobo Atas" msgid "Mandobo Atas"
msgstr "" msgstr "Мандобо Атас"
#. name for aaz #. name for aaz
msgid "Amarasi" msgid "Amarasi"
msgstr "" msgstr "Амараси"
#. name for aba #. name for aba
msgid "Abé" msgid "Abé"
msgstr "" msgstr "Абе"
#. name for abb #. name for abb
msgid "Bankon" msgid "Bankon"
msgstr "" msgstr "Банкон"
#. name for abc #. name for abc
msgid "Ayta; Ambala" msgid "Ayta; Ambala"
msgstr "" msgstr "Аита;Амбала"
#. name for abd #. name for abd
msgid "Manide" msgid "Manide"
@ -128,235 +128,235 @@ msgstr ""
#. name for abe #. name for abe
msgid "Abnaki; Western" msgid "Abnaki; Western"
msgstr "" msgstr "Абнаки; Западни"
#. name for abf #. name for abf
msgid "Abai Sungai" msgid "Abai Sungai"
msgstr "" msgstr "Абаи Сунгаи"
#. name for abg #. name for abg
msgid "Abaga" msgid "Abaga"
msgstr "" msgstr "Абага"
#. name for abh #. name for abh
msgid "Arabic; Tajiki" msgid "Arabic; Tajiki"
msgstr "" msgstr "Арапски; Таџики"
#. name for abi #. name for abi
msgid "Abidji" msgid "Abidji"
msgstr "" msgstr "Абиџи"
#. name for abj #. name for abj
msgid "Aka-Bea" msgid "Aka-Bea"
msgstr "" msgstr "Ака-Беа"
#. name for abk #. name for abk
msgid "Abkhazian" msgid "Abkhazian"
msgstr "абкаски" msgstr "Абхазијски"
#. name for abl #. name for abl
msgid "Lampung Nyo" msgid "Lampung Nyo"
msgstr "" msgstr "Лампунг Нио"
#. name for abm #. name for abm
msgid "Abanyom" msgid "Abanyom"
msgstr "" msgstr "Абањјом"
#. name for abn #. name for abn
msgid "Abua" msgid "Abua"
msgstr "" msgstr "Абуа"
#. name for abo #. name for abo
msgid "Abon" msgid "Abon"
msgstr "" msgstr "Абон"
#. name for abp #. name for abp
msgid "Ayta; Abellen" msgid "Ayta; Abellen"
msgstr "" msgstr "Ајта (абелијска)"
#. name for abq #. name for abq
msgid "Abaza" msgid "Abaza"
msgstr "" msgstr "Абаза"
#. name for abr #. name for abr
msgid "Abron" msgid "Abron"
msgstr "" msgstr "Аброн"
#. name for abs #. name for abs
msgid "Malay; Ambonese" msgid "Malay; Ambonese"
msgstr "" msgstr "Малајски; Амбонијски"
#. name for abt #. name for abt
msgid "Ambulas" msgid "Ambulas"
msgstr "" msgstr "Амбулас"
#. name for abu #. name for abu
msgid "Abure" msgid "Abure"
msgstr "" msgstr "Абуре"
#. name for abv #. name for abv
msgid "Arabic; Baharna" msgid "Arabic; Baharna"
msgstr "" msgstr "Арапски (Бахреин)"
#. name for abw #. name for abw
msgid "Pal" msgid "Pal"
msgstr "" msgstr "Пал"
#. name for abx #. name for abx
msgid "Inabaknon" msgid "Inabaknon"
msgstr "" msgstr "Инабакнон"
#. name for aby #. name for aby
msgid "Aneme Wake" msgid "Aneme Wake"
msgstr "" msgstr "Анем Ваке"
#. name for abz #. name for abz
msgid "Abui" msgid "Abui"
msgstr "" msgstr "Абуи"
#. name for aca #. name for aca
msgid "Achagua" msgid "Achagua"
msgstr "" msgstr "Ачагуа"
#. name for acb #. name for acb
msgid "Áncá" msgid "Áncá"
msgstr "" msgstr "Анка"
#. name for acd #. name for acd
msgid "Gikyode" msgid "Gikyode"
msgstr "" msgstr "Гикиод"
#. name for ace #. name for ace
msgid "Achinese" msgid "Achinese"
msgstr "акинески" msgstr "Акинески"
#. name for acf #. name for acf
msgid "Creole French; Saint Lucian" msgid "Creole French; Saint Lucian"
msgstr "" msgstr "Креолски француски; Сент Лусија"
#. name for ach #. name for ach
msgid "Acoli" msgid "Acoli"
msgstr "аколи" msgstr "Аколи"
#. name for aci #. name for aci
msgid "Aka-Cari" msgid "Aka-Cari"
msgstr "" msgstr "Ака-Кари"
#. name for ack #. name for ack
msgid "Aka-Kora" msgid "Aka-Kora"
msgstr "" msgstr "Ака-Кора"
#. name for acl #. name for acl
msgid "Akar-Bale" msgid "Akar-Bale"
msgstr "" msgstr "Акар-Бале"
#. name for acm #. name for acm
msgid "Arabic; Mesopotamian" msgid "Arabic; Mesopotamian"
msgstr "" msgstr "Арапски (Месопотамија)"
#. name for acn #. name for acn
msgid "Achang" msgid "Achang"
msgstr "" msgstr "Ачанг"
#. name for acp #. name for acp
msgid "Acipa; Eastern" msgid "Acipa; Eastern"
msgstr "" msgstr "Акипа;Источни"
#. name for acq #. name for acq
msgid "Arabic; Ta'izzi-Adeni" msgid "Arabic; Ta'izzi-Adeni"
msgstr "" msgstr "Арапски; Северни Јемен"
#. name for acr #. name for acr
msgid "Achi" msgid "Achi"
msgstr "" msgstr "Ачи"
#. name for acs #. name for acs
msgid "Acroá" msgid "Acroá"
msgstr "" msgstr "Акроа"
#. name for act #. name for act
msgid "Achterhoeks" msgid "Achterhoeks"
msgstr "" msgstr "Ахтерхекс"
#. name for acu #. name for acu
msgid "Achuar-Shiwiar" msgid "Achuar-Shiwiar"
msgstr "" msgstr "Ачуар-Шивиар"
#. name for acv #. name for acv
msgid "Achumawi" msgid "Achumawi"
msgstr "" msgstr "Ачумави"
#. name for acw #. name for acw
msgid "Arabic; Hijazi" msgid "Arabic; Hijazi"
msgstr "" msgstr "Арапски;Хиџази"
#. name for acx #. name for acx
msgid "Arabic; Omani" msgid "Arabic; Omani"
msgstr "" msgstr "Арапски;Оман"
#. name for acy #. name for acy
msgid "Arabic; Cypriot" msgid "Arabic; Cypriot"
msgstr "" msgstr "Арапски;Кипар"
#. name for acz #. name for acz
msgid "Acheron" msgid "Acheron"
msgstr "" msgstr "Ачерон"
#. name for ada #. name for ada
msgid "Adangme" msgid "Adangme"
msgstr "адангме" msgstr "Адангме"
#. name for adb #. name for adb
msgid "Adabe" msgid "Adabe"
msgstr "" msgstr "Адабе"
#. name for add #. name for add
msgid "Dzodinka" msgid "Dzodinka"
msgstr "" msgstr "Ђодинка"
#. name for ade #. name for ade
msgid "Adele" msgid "Adele"
msgstr "" msgstr "Аделе"
#. name for adf #. name for adf
msgid "Arabic; Dhofari" msgid "Arabic; Dhofari"
msgstr "" msgstr "Арапски;Дофари"
#. name for adg #. name for adg
msgid "Andegerebinha" msgid "Andegerebinha"
msgstr "" msgstr "Андегеребина"
#. name for adh #. name for adh
msgid "Adhola" msgid "Adhola"
msgstr "" msgstr "Адола"
#. name for adi #. name for adi
msgid "Adi" msgid "Adi"
msgstr "" msgstr "Ади"
#. name for adj #. name for adj
msgid "Adioukrou" msgid "Adioukrou"
msgstr "" msgstr "Адиокру"
#. name for adl #. name for adl
msgid "Galo" msgid "Galo"
msgstr "" msgstr "Гало"
#. name for adn #. name for adn
msgid "Adang" msgid "Adang"
msgstr "" msgstr "Аданг"
#. name for ado #. name for ado
msgid "Abu" msgid "Abu"
msgstr "" msgstr "Абу"
#. name for adp #. name for adp
msgid "Adap" msgid "Adap"
msgstr "" msgstr "Адап"
#. name for adq #. name for adq
msgid "Adangbe" msgid "Adangbe"
msgstr "" msgstr "Адангбе"
#. name for adr #. name for adr
msgid "Adonara" msgid "Adonara"
@ -364,59 +364,59 @@ msgstr ""
#. name for ads #. name for ads
msgid "Adamorobe Sign Language" msgid "Adamorobe Sign Language"
msgstr "" msgstr "Адамороб знаковни језик"
#. name for adt #. name for adt
msgid "Adnyamathanha" msgid "Adnyamathanha"
msgstr "" msgstr "Адњаматана"
#. name for adu #. name for adu
msgid "Aduge" msgid "Aduge"
msgstr "" msgstr "Адуге"
#. name for adw #. name for adw
msgid "Amundava" msgid "Amundava"
msgstr "" msgstr "Амундава"
#. name for adx #. name for adx
msgid "Tibetan; Amdo" msgid "Tibetan; Amdo"
msgstr "" msgstr "Тибетански;Амдо"
#. name for ady #. name for ady
msgid "Adyghe" msgid "Adyghe"
msgstr "" msgstr "Адиге"
#. name for adz #. name for adz
msgid "Adzera" msgid "Adzera"
msgstr "" msgstr "Адзера"
#. name for aea #. name for aea
msgid "Areba" msgid "Areba"
msgstr "" msgstr "Ареба"
#. name for aeb #. name for aeb
msgid "Arabic; Tunisian" msgid "Arabic; Tunisian"
msgstr "" msgstr "Арапски;Туниски"
#. name for aec #. name for aec
msgid "Arabic; Saidi" msgid "Arabic; Saidi"
msgstr "" msgstr "Арапски (Горњи Египат)"
#. name for aed #. name for aed
msgid "Argentine Sign Language" msgid "Argentine Sign Language"
msgstr "" msgstr "Аргентински знаковни језик"
#. name for aee #. name for aee
msgid "Pashayi; Northeast" msgid "Pashayi; Northeast"
msgstr "" msgstr "Пашаи (североисточни)"
#. name for aek #. name for aek
msgid "Haeke" msgid "Haeke"
msgstr "" msgstr "Хаеке"
#. name for ael #. name for ael
msgid "Ambele" msgid "Ambele"
msgstr "" msgstr "Амбеле"
#. name for aem #. name for aem
msgid "Arem" msgid "Arem"
@ -460,15 +460,15 @@ msgstr ""
#. name for afd #. name for afd
msgid "Andai" msgid "Andai"
msgstr "" msgstr "Андаи"
#. name for afe #. name for afe
msgid "Putukwam" msgid "Putukwam"
msgstr "" msgstr "Путуквам"
#. name for afg #. name for afg
msgid "Afghan Sign Language" msgid "Afghan Sign Language"
msgstr "" msgstr "Афганистански знаковни језик"
#. name for afh #. name for afh
msgid "Afrihili" msgid "Afrihili"
@ -476,7 +476,7 @@ msgstr "африхили"
#. name for afi #. name for afi
msgid "Akrukay" msgid "Akrukay"
msgstr "" msgstr "Акрукај"
#. name for afk #. name for afk
msgid "Nanubae" msgid "Nanubae"
@ -484,15 +484,15 @@ msgstr ""
#. name for afn #. name for afn
msgid "Defaka" msgid "Defaka"
msgstr "" msgstr "Дефака"
#. name for afo #. name for afo
msgid "Eloyi" msgid "Eloyi"
msgstr "" msgstr "Елоји"
#. name for afp #. name for afp
msgid "Tapei" msgid "Tapei"
msgstr "" msgstr "Тапеи"
#. name for afr #. name for afr
msgid "Afrikaans" msgid "Afrikaans"
@ -500,51 +500,51 @@ msgstr "африканс"
#. name for afs #. name for afs
msgid "Creole; Afro-Seminole" msgid "Creole; Afro-Seminole"
msgstr "" msgstr "Креолски;Афричко-Семинолслки"
#. name for aft #. name for aft
msgid "Afitti" msgid "Afitti"
msgstr "" msgstr "Афити"
#. name for afu #. name for afu
msgid "Awutu" msgid "Awutu"
msgstr "" msgstr "Авуту"
#. name for afz #. name for afz
msgid "Obokuitai" msgid "Obokuitai"
msgstr "" msgstr "Обокуитаи"
#. name for aga #. name for aga
msgid "Aguano" msgid "Aguano"
msgstr "" msgstr "Агвано"
#. name for agb #. name for agb
msgid "Legbo" msgid "Legbo"
msgstr "" msgstr "Легбо"
#. name for agc #. name for agc
msgid "Agatu" msgid "Agatu"
msgstr "" msgstr "Агату"
#. name for agd #. name for agd
msgid "Agarabi" msgid "Agarabi"
msgstr "" msgstr "Агараби"
#. name for age #. name for age
msgid "Angal" msgid "Angal"
msgstr "" msgstr "Ангал"
#. name for agf #. name for agf
msgid "Arguni" msgid "Arguni"
msgstr "" msgstr "Аргуни"
#. name for agg #. name for agg
msgid "Angor" msgid "Angor"
msgstr "" msgstr "Ангор"
#. name for agh #. name for agh
msgid "Ngelima" msgid "Ngelima"
msgstr "" msgstr "Нгелима"
#. name for agi #. name for agi
msgid "Agariya" msgid "Agariya"
@ -588,15 +588,15 @@ msgstr ""
#. name for agt #. name for agt
msgid "Agta; Central Cagayan" msgid "Agta; Central Cagayan"
msgstr "" msgstr "Агта;Централно Кагајански"
#. name for agu #. name for agu
msgid "Aguacateco" msgid "Aguacateco"
msgstr "" msgstr "Агвакатеко"
#. name for agv #. name for agv
msgid "Dumagat; Remontado" msgid "Dumagat; Remontado"
msgstr "" msgstr "Думагат;Ремонтадо"
#. name for agw #. name for agw
msgid "Kahua" msgid "Kahua"
@ -604,27 +604,27 @@ msgstr ""
#. name for agx #. name for agx
msgid "Aghul" msgid "Aghul"
msgstr "" msgstr "Агхул"
#. name for agy #. name for agy
msgid "Alta; Southern" msgid "Alta; Southern"
msgstr "" msgstr "Алта;Јужни"
#. name for agz #. name for agz
msgid "Agta; Mt. Iriga" msgid "Agta; Mt. Iriga"
msgstr "" msgstr "Агта;Мт.Ирига"
#. name for aha #. name for aha
msgid "Ahanta" msgid "Ahanta"
msgstr "" msgstr "Аханта"
#. name for ahb #. name for ahb
msgid "Axamb" msgid "Axamb"
msgstr "" msgstr "Аксамб"
#. name for ahg #. name for ahg
msgid "Qimant" msgid "Qimant"
msgstr "" msgstr "Кимант"
#. name for ahh #. name for ahh
msgid "Aghu" msgid "Aghu"
@ -668,95 +668,95 @@ msgstr ""
#. name for aht #. name for aht
msgid "Ahtena" msgid "Ahtena"
msgstr "" msgstr "Ахтена"
#. name for aia #. name for aia
msgid "Arosi" msgid "Arosi"
msgstr "" msgstr "Ароси"
#. name for aib #. name for aib
msgid "Ainu (China)" msgid "Ainu (China)"
msgstr "" msgstr "Аину(Кина)"
#. name for aic #. name for aic
msgid "Ainbai" msgid "Ainbai"
msgstr "" msgstr "Аинбаи"
#. name for aid #. name for aid
msgid "Alngith" msgid "Alngith"
msgstr "" msgstr "Алнгит"
#. name for aie #. name for aie
msgid "Amara" msgid "Amara"
msgstr "" msgstr "Амара"
#. name for aif #. name for aif
msgid "Agi" msgid "Agi"
msgstr "" msgstr "Аги"
#. name for aig #. name for aig
msgid "Creole English; Antigua and Barbuda" msgid "Creole English; Antigua and Barbuda"
msgstr "" msgstr "Креолски Енглески;Антигва и Барбуда"
#. name for aih #. name for aih
msgid "Ai-Cham" msgid "Ai-Cham"
msgstr "" msgstr "Аи-Чам"
#. name for aii #. name for aii
msgid "Neo-Aramaic; Assyrian" msgid "Neo-Aramaic; Assyrian"
msgstr "" msgstr "Ново-Арамејски;Асирски"
#. name for aij #. name for aij
msgid "Lishanid Noshan" msgid "Lishanid Noshan"
msgstr "" msgstr "Лианид Ношан"
#. name for aik #. name for aik
msgid "Ake" msgid "Ake"
msgstr "" msgstr "Аке"
#. name for ail #. name for ail
msgid "Aimele" msgid "Aimele"
msgstr "" msgstr "Ајмеле"
#. name for aim #. name for aim
msgid "Aimol" msgid "Aimol"
msgstr "" msgstr "Ајмол"
#. name for ain #. name for ain
msgid "Ainu (Japan)" msgid "Ainu (Japan)"
msgstr "" msgstr "Аину(Јапан)"
#. name for aio #. name for aio
msgid "Aiton" msgid "Aiton"
msgstr "" msgstr "Аитон"
#. name for aip #. name for aip
msgid "Burumakok" msgid "Burumakok"
msgstr "" msgstr "Бурумакок"
#. name for aiq #. name for aiq
msgid "Aimaq" msgid "Aimaq"
msgstr "" msgstr "Ајмак"
#. name for air #. name for air
msgid "Airoran" msgid "Airoran"
msgstr "" msgstr "Ајроран"
#. name for ais #. name for ais
msgid "Amis; Nataoran" msgid "Amis; Nataoran"
msgstr "" msgstr "Амис;Натаоран"
#. name for ait #. name for ait
msgid "Arikem" msgid "Arikem"
msgstr "" msgstr "Арикем"
#. name for aiw #. name for aiw
msgid "Aari" msgid "Aari"
msgstr "" msgstr "Аари"
#. name for aix #. name for aix
msgid "Aighon" msgid "Aighon"
msgstr "" msgstr "Аигхон"
#. name for aiy #. name for aiy
msgid "Ali" msgid "Ali"
@ -764,35 +764,35 @@ msgstr ""
#. name for aja #. name for aja
msgid "Aja (Sudan)" msgid "Aja (Sudan)"
msgstr "" msgstr "Аја(Судан)"
#. name for ajg #. name for ajg
msgid "Aja (Benin)" msgid "Aja (Benin)"
msgstr "" msgstr "Аја(Бенин)"
#. name for aji #. name for aji
msgid "Ajië" msgid "Ajië"
msgstr "" msgstr "Ајие"
#. name for ajp #. name for ajp
msgid "Arabic; South Levantine" msgid "Arabic; South Levantine"
msgstr "" msgstr "Арапски;Јужно-Левантински"
#. name for ajt #. name for ajt
msgid "Arabic; Judeo-Tunisian" msgid "Arabic; Judeo-Tunisian"
msgstr "" msgstr "Арапски;Јудео-Туниски"
#. name for aju #. name for aju
msgid "Arabic; Judeo-Moroccan" msgid "Arabic; Judeo-Moroccan"
msgstr "" msgstr "Арапски;Јудео-Марокански"
#. name for ajw #. name for ajw
msgid "Ajawa" msgid "Ajawa"
msgstr "" msgstr "Ајава"
#. name for ajz #. name for ajz
msgid "Karbi; Amri" msgid "Karbi; Amri"
msgstr "" msgstr "Карби;Амри"
#. name for aka #. name for aka
msgid "Akan" msgid "Akan"
@ -800,35 +800,35 @@ msgstr "акан"
#. name for akb #. name for akb
msgid "Batak Angkola" msgid "Batak Angkola"
msgstr "" msgstr "Батак Ангкола"
#. name for akc #. name for akc
msgid "Mpur" msgid "Mpur"
msgstr "" msgstr "Мпур"
#. name for akd #. name for akd
msgid "Ukpet-Ehom" msgid "Ukpet-Ehom"
msgstr "" msgstr "Укпет-Ехом"
#. name for ake #. name for ake
msgid "Akawaio" msgid "Akawaio"
msgstr "" msgstr "Акавајо"
#. name for akf #. name for akf
msgid "Akpa" msgid "Akpa"
msgstr "" msgstr "Акипа"
#. name for akg #. name for akg
msgid "Anakalangu" msgid "Anakalangu"
msgstr "" msgstr "Анакалангу"
#. name for akh #. name for akh
msgid "Angal Heneng" msgid "Angal Heneng"
msgstr "" msgstr "Ангал Хененг"
#. name for aki #. name for aki
msgid "Aiome" msgid "Aiome"
msgstr "" msgstr "Ајоме"
#. name for akj #. name for akj
msgid "Aka-Jeru" msgid "Aka-Jeru"

View File

@ -151,7 +151,8 @@ class Translations(POT): # {{{
self.info('\tCopying ISO 639 translations') self.info('\tCopying ISO 639 translations')
subprocess.check_call(['msgfmt', '-o', dest, iso639]) subprocess.check_call(['msgfmt', '-o', dest, iso639])
elif locale not in ('en_GB', 'en_CA', 'en_AU', 'si', 'ur', 'sc', elif locale not in ('en_GB', 'en_CA', 'en_AU', 'si', 'ur', 'sc',
'ltg', 'nds', 'te', 'yi', 'fo', 'sq', 'ast', 'ml', 'ku'): 'ltg', 'nds', 'te', 'yi', 'fo', 'sq', 'ast', 'ml', 'ku',
'fr_CA'):
self.warn('No ISO 639 translations for locale:', locale) self.warn('No ISO 639 translations for locale:', locale)
self.write_stats() self.write_stats()

View File

@ -132,12 +132,15 @@ class UploadInstallers(Command): # {{{
with open(os.path.join(tdir, 'fmap'), 'wb') as fo: with open(os.path.join(tdir, 'fmap'), 'wb') as fo:
for f, desc in files.iteritems(): for f, desc in files.iteritems():
fo.write('%s: %s\n'%(f, desc)) fo.write('%s: %s\n'%(f, desc))
try:
send_data(tdir) while True:
except: try:
print('\nUpload to staging failed, retrying in a minute') send_data(tdir)
time.sleep(60) except:
send_data(tdir) print('\nUpload to staging failed, retrying in a minute')
time.sleep(60)
else:
break
def upload_to_google(self, replace): def upload_to_google(self, replace):
gdata = get_google_data() gdata = get_google_data()

View File

@ -419,7 +419,7 @@ class CurrentDir(object):
self.cwd = None self.cwd = None
def __enter__(self, *args): def __enter__(self, *args):
self.cwd = os.getcwd() self.cwd = os.getcwdu()
os.chdir(self.path) os.chdir(self.path)
return self.cwd return self.cwd

View File

@ -4,7 +4,7 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net' __copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en' __docformat__ = 'restructuredtext en'
__appname__ = u'calibre' __appname__ = u'calibre'
numeric_version = (0, 8, 40) numeric_version = (0, 8, 43)
__version__ = u'.'.join(map(unicode, numeric_version)) __version__ = u'.'.join(map(unicode, numeric_version))
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>" __author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"
@ -190,3 +190,14 @@ def get_windows_username():
return buf.value return buf.value
return get_unicode_windows_env_var(u'USERNAME') return get_unicode_windows_env_var(u'USERNAME')
def get_windows_temp_path():
import ctypes
n = ctypes.windll.kernel32.GetTempPathW(0, None)
if n == 0:
return None
buf = ctypes.create_unicode_buffer(u'\0'*n)
ctypes.windll.kernel32.GetTempPathW(n, buf)
ans = buf.value
return ans if ans else None

View File

@ -263,7 +263,7 @@ class MOBIMetadataReader(MetadataReaderPlugin):
description = _('Read metadata from %s files')%'MOBI' description = _('Read metadata from %s files')%'MOBI'
def get_metadata(self, stream, ftype): def get_metadata(self, stream, ftype):
from calibre.ebooks.mobi.reader import get_metadata from calibre.ebooks.metadata.mobi import get_metadata
return get_metadata(stream) return get_metadata(stream)
class ODTMetadataReader(MetadataReaderPlugin): class ODTMetadataReader(MetadataReaderPlugin):
@ -284,7 +284,7 @@ class OPFMetadataReader(MetadataReaderPlugin):
def get_metadata(self, stream, ftype): def get_metadata(self, stream, ftype):
from calibre.ebooks.metadata.opf2 import OPF from calibre.ebooks.metadata.opf2 import OPF
return OPF(stream, os.getcwd()).to_book_metadata() return OPF(stream, os.getcwdu()).to_book_metadata()
class PDBMetadataReader(MetadataReaderPlugin): class PDBMetadataReader(MetadataReaderPlugin):

View File

@ -192,9 +192,13 @@ class InputFormatPlugin(Plugin):
def __call__(self, stream, options, file_ext, log, def __call__(self, stream, options, file_ext, log,
accelerators, output_dir): accelerators, output_dir):
log('InputFormatPlugin: %s running'%self.name) try:
if hasattr(stream, 'name'): log('InputFormatPlugin: %s running'%self.name)
log('on', stream.name) if hasattr(stream, 'name'):
log('on', stream.name)
except:
# In case stdout is broken
pass
with CurrentDir(output_dir): with CurrentDir(output_dir):
for x in os.listdir('.'): for x in os.listdir('.'):

View File

@ -379,6 +379,7 @@ class iPadOutput(OutputProfile):
/* Feed summary formatting */ /* Feed summary formatting */
.article_summary { .article_summary {
display:inline-block; display:inline-block;
padding-bottom:0.5em;
} }
.feed { .feed {
font-family:sans-serif; font-family:sans-serif;
@ -431,6 +432,15 @@ class iPadOutput(OutputProfile):
''' '''
# }}} # }}}
class iPad3Output(iPadOutput):
screen_size = comic_screen_size = (2048, 1536)
dpi = 264.0
name = 'iPad 3'
short_name = 'ipad3'
description = _('Intended for the iPad 3 and similar devices with a '
'resolution of 1536x2048')
class TabletOutput(iPadOutput): class TabletOutput(iPadOutput):
name = 'Tablet' name = 'Tablet'
short_name = 'tablet' short_name = 'tablet'
@ -754,7 +764,7 @@ class PocketBook900Output(OutputProfile):
output_profiles = [OutputProfile, SonyReaderOutput, SonyReader300Output, output_profiles = [OutputProfile, SonyReaderOutput, SonyReader300Output,
SonyReader900Output, MSReaderOutput, MobipocketOutput, HanlinV3Output, SonyReader900Output, MSReaderOutput, MobipocketOutput, HanlinV3Output,
HanlinV5Output, CybookG3Output, CybookOpusOutput, KindleOutput, HanlinV5Output, CybookG3Output, CybookOpusOutput, KindleOutput,
iPadOutput, KoboReaderOutput, TabletOutput, SamsungGalaxy, iPadOutput, iPad3Output, KoboReaderOutput, TabletOutput, SamsungGalaxy,
SonyReaderLandscapeOutput, KindleDXOutput, IlliadOutput, SonyReaderLandscapeOutput, KindleDXOutput, IlliadOutput,
IRexDR1000Output, IRexDR800Output, JetBook5Output, NookOutput, IRexDR1000Output, IRexDR800Output, JetBook5Output, NookOutput,
BambookOutput, NookColorOutput, PocketBook900Output, GenericEink, BambookOutput, NookColorOutput, PocketBook900Output, GenericEink,

View File

@ -51,8 +51,9 @@ Run an embedded python interpreter.
'with sqlite3 works.') 'with sqlite3 works.')
parser.add_option('-p', '--py-console', help='Run python console', parser.add_option('-p', '--py-console', help='Run python console',
default=False, action='store_true') default=False, action='store_true')
parser.add_option('-m', '--inspect-mobi', parser.add_option('-m', '--inspect-mobi', action='store_true',
help='Inspect the MOBI file at the specified path', default=None) default=False,
help='Inspect the MOBI file(s) at the specified path(s)')
parser.add_option('--test-build', help='Test binary modules in build', parser.add_option('--test-build', help='Test binary modules in build',
action='store_true', default=False) action='store_true', default=False)
@ -137,7 +138,7 @@ def add_simple_plugin(path_to_plugin):
tdir = tempfile.mkdtemp() tdir = tempfile.mkdtemp()
open(os.path.join(tdir, 'custom_plugin.py'), open(os.path.join(tdir, 'custom_plugin.py'),
'wb').write(open(path_to_plugin, 'rb').read()) 'wb').write(open(path_to_plugin, 'rb').read())
odir = os.getcwd() odir = os.getcwdu()
os.chdir(tdir) os.chdir(tdir)
zf = zipfile.ZipFile('plugin.zip', 'w') zf = zipfile.ZipFile('plugin.zip', 'w')
zf.write('custom_plugin.py') zf.write('custom_plugin.py')
@ -232,9 +233,13 @@ def main(args=sys.argv):
if len(args) > 1 and os.access(args[-1], os.R_OK): if len(args) > 1 and os.access(args[-1], os.R_OK):
sql_dump = args[-1] sql_dump = args[-1]
reinit_db(opts.reinitialize_db, sql_dump=sql_dump) reinit_db(opts.reinitialize_db, sql_dump=sql_dump)
elif opts.inspect_mobi is not None: elif opts.inspect_mobi:
from calibre.ebooks.mobi.debug import inspect_mobi from calibre.ebooks.mobi.debug import inspect_mobi
inspect_mobi(opts.inspect_mobi) for path in args[1:]:
prints('Inspecting:', path)
inspect_mobi(path)
print
elif opts.test_build: elif opts.test_build:
from calibre.test_build import test from calibre.test_build import test
test() test()

View File

@ -68,6 +68,7 @@ class ANDROID(USBMS):
# Sony Ericsson # Sony Ericsson
0xfce : { 0xfce : {
0xd12e : [0x0100], 0xd12e : [0x0100],
0xe15d : [0x226],
0xe14f : [0x0226], 0xe14f : [0x0226],
0x614f : [0x0226, 0x100], 0x614f : [0x0226, 0x100],
0x6156 : [0x0226, 0x100], 0x6156 : [0x0226, 0x100],
@ -80,16 +81,17 @@ class ANDROID(USBMS):
0x4e11 : [0x0100, 0x226, 0x227], 0x4e11 : [0x0100, 0x226, 0x227],
0x4e12 : [0x0100, 0x226, 0x227], 0x4e12 : [0x0100, 0x226, 0x227],
0x4e21 : [0x0100, 0x226, 0x227, 0x231], 0x4e21 : [0x0100, 0x226, 0x227, 0x231],
0x4e22 : [0x0100, 0x226, 0x227], 0x4e22 : [0x0100, 0x226, 0x227, 0x231],
0xb058 : [0x0222, 0x226, 0x227], 0xb058 : [0x0222, 0x226, 0x227],
0x0ff9 : [0x0226], 0x0ff9 : [0x0226],
0xdddd : [0x216],
}, },
# Samsung # Samsung
0x04e8 : { 0x681d : [0x0222, 0x0223, 0x0224, 0x0400], 0x04e8 : { 0x681d : [0x0222, 0x0223, 0x0224, 0x0400],
0x681c : [0x0222, 0x0223, 0x0224, 0x0400], 0x681c : [0x0222, 0x0223, 0x0224, 0x0400],
0x6640 : [0x0100], 0x6640 : [0x0100],
0x685b : [0x0400], 0x685b : [0x0400, 0x0226],
0x685e : [0x0400], 0x685e : [0x0400],
0x6860 : [0x0400], 0x6860 : [0x0400],
0x6877 : [0x0400], 0x6877 : [0x0400],
@ -170,7 +172,7 @@ class ANDROID(USBMS):
'TELECHIP', 'HUAWEI', 'T-MOBILE', 'SEMC', 'LGE', 'NVIDIA', 'TELECHIP', 'HUAWEI', 'T-MOBILE', 'SEMC', 'LGE', 'NVIDIA',
'GENERIC-', 'ZTE', 'MID', 'QUALCOMM', 'PANDIGIT', 'HYSTON', 'GENERIC-', 'ZTE', 'MID', 'QUALCOMM', 'PANDIGIT', 'HYSTON',
'VIZIO', 'GOOGLE', 'FREESCAL', 'KOBO_INC', 'LENOVO', 'ROCKCHIP', 'VIZIO', 'GOOGLE', 'FREESCAL', 'KOBO_INC', 'LENOVO', 'ROCKCHIP',
'POCKET', 'ONDA_MID'] 'POCKET', 'ONDA_MID', 'ZENITHIN', 'INGENIC']
WINDOWS_MAIN_MEM = ['ANDROID_PHONE', 'A855', 'A853', 'INC.NEXUS_ONE', WINDOWS_MAIN_MEM = ['ANDROID_PHONE', 'A855', 'A853', 'INC.NEXUS_ONE',
'__UMS_COMPOSITE', '_MB200', 'MASS_STORAGE', '_-_CARD', 'SGH-I897', '__UMS_COMPOSITE', '_MB200', 'MASS_STORAGE', '_-_CARD', 'SGH-I897',
'GT-I9000', 'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-I9000', 'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID',
@ -184,14 +186,16 @@ class ANDROID(USBMS):
'ALPANDIGITAL', 'ANDROID_MID', 'VTAB1008', 'EMX51_BBG_ANDROI', 'ALPANDIGITAL', 'ANDROID_MID', 'VTAB1008', 'EMX51_BBG_ANDROI',
'UMS', '.K080', 'P990', 'LTE', 'MB853', 'GT-S5660_CARD', 'A107', 'UMS', '.K080', 'P990', 'LTE', 'MB853', 'GT-S5660_CARD', 'A107',
'GT-I9003_CARD', 'XT912', 'FILE-CD_GADGET', 'RK29_SDK', 'MB855', 'GT-I9003_CARD', 'XT912', 'FILE-CD_GADGET', 'RK29_SDK', 'MB855',
'XT910', 'BOOK_A10', 'USB_2.0_DRIVER'] 'XT910', 'BOOK_A10', 'USB_2.0_DRIVER', 'I9100T', 'P999DW',
'KTABLET_PC', 'INGENIC']
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897', WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD', 'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD',
'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD', 'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD',
'__UMS_COMPOSITE', 'SGH-I997_CARD', 'MB870', 'ALPANDIGITAL', '__UMS_COMPOSITE', 'SGH-I997_CARD', 'MB870', 'ALPANDIGITAL',
'ANDROID_MID', 'P990_SD_CARD', '.K080', 'LTE_CARD', 'MB853', 'ANDROID_MID', 'P990_SD_CARD', '.K080', 'LTE_CARD', 'MB853',
'A1-07___C0541A4F', 'XT912', 'MB855', 'XT910', 'BOOK_A10_CARD', 'A1-07___C0541A4F', 'XT912', 'MB855', 'XT910', 'BOOK_A10_CARD',
'USB_2.0_DRIVER'] 'USB_2.0_DRIVER', 'I9100T', 'P999DW_SD_CARD', 'KTABLET_PC',
'FILE-CD_GADGET']
OSX_MAIN_MEM = 'Android Device Main Memory' OSX_MAIN_MEM = 'Android Device Main Memory'

View File

@ -103,17 +103,6 @@ class AppleOpenFeedback(OpenFeedback):
if isosx:
try:
import appscript
appscript
except:
# appscript fails to load on 10.4
appscript = None
if iswindows:
import pythoncom, win32com.client
class DriverBase(DeviceConfig, DevicePlugin): class DriverBase(DeviceConfig, DevicePlugin):
# Needed for config_widget to work # Needed for config_widget to work
FORMATS = ['epub', 'pdf'] FORMATS = ['epub', 'pdf']
@ -467,6 +456,7 @@ class ITUNES(DriverBase):
self._purge_orphans(library_books, cached_books) self._purge_orphans(library_books, cached_books)
elif iswindows: elif iswindows:
import pythoncom, win32com.client
try: try:
pythoncom.CoInitialize() pythoncom.CoInitialize()
self.iTunes = win32com.client.Dispatch("iTunes.Application") self.iTunes = win32com.client.Dispatch("iTunes.Application")
@ -533,6 +523,11 @@ class ITUNES(DriverBase):
instantiate iTunes if necessary instantiate iTunes if necessary
This gets called ~1x/second while device fingerprint is sensed This gets called ~1x/second while device fingerprint is sensed
''' '''
try:
import appscript
appscript
except:
appscript = None
if appscript is None: if appscript is None:
return False return False
@ -599,6 +594,8 @@ class ITUNES(DriverBase):
iPad, as we have to return True if we can handle device interaction, or False if not. iPad, as we have to return True if we can handle device interaction, or False if not.
''' '''
import pythoncom
if self.iTunes: if self.iTunes:
# We've previously run, so the user probably ejected the device # We've previously run, so the user probably ejected the device
try: try:
@ -709,6 +706,7 @@ class ITUNES(DriverBase):
if self.manual_sync_mode: if self.manual_sync_mode:
self._remove_from_device(self.cached_books[path]) self._remove_from_device(self.cached_books[path])
elif iswindows: elif iswindows:
import pythoncom, win32com.client
try: try:
pythoncom.CoInitialize() pythoncom.CoInitialize()
self.iTunes = win32com.client.Dispatch("iTunes.Application") self.iTunes = win32com.client.Dispatch("iTunes.Application")
@ -754,6 +752,8 @@ class ITUNES(DriverBase):
self.iTunes.eject(self.sources['iPod']) self.iTunes.eject(self.sources['iPod'])
elif iswindows: elif iswindows:
if 'iPod' in self.sources: if 'iPod' in self.sources:
import pythoncom, win32com.client
try: try:
pythoncom.CoInitialize() pythoncom.CoInitialize()
self.iTunes = win32com.client.Dispatch("iTunes.Application") self.iTunes = win32com.client.Dispatch("iTunes.Application")
@ -788,6 +788,7 @@ class ITUNES(DriverBase):
elif iswindows: elif iswindows:
if 'iPod' in self.sources: if 'iPod' in self.sources:
import pythoncom, win32com.client
while True: while True:
try: try:
@ -1098,6 +1099,8 @@ class ITUNES(DriverBase):
_('%(num)d of %(tot)d') % dict(num=i+1, tot=file_count)) _('%(num)d of %(tot)d') % dict(num=i+1, tot=file_count))
elif iswindows: elif iswindows:
import pythoncom, win32com.client
try: try:
pythoncom.CoInitialize() pythoncom.CoInitialize()
self.iTunes = win32com.client.Dispatch("iTunes.Application") self.iTunes = win32com.client.Dispatch("iTunes.Application")
@ -1163,6 +1166,7 @@ class ITUNES(DriverBase):
''' '''
logger().info(" ITUNES._add_device_book()") logger().info(" ITUNES._add_device_book()")
if isosx: if isosx:
import appscript
if 'iPod' in self.sources: if 'iPod' in self.sources:
connected_device = self.sources['iPod'] connected_device = self.sources['iPod']
device = self.iTunes.sources[connected_device] device = self.iTunes.sources[connected_device]
@ -1257,6 +1261,7 @@ class ITUNES(DriverBase):
if DEBUG: if DEBUG:
logger().info(" ITUNES._add_library_book()") logger().info(" ITUNES._add_library_book()")
if isosx: if isosx:
import appscript
added = self.iTunes.add(appscript.mactypes.File(file)) added = self.iTunes.add(appscript.mactypes.File(file))
elif iswindows: elif iswindows:
@ -1541,6 +1546,7 @@ class ITUNES(DriverBase):
if wait: if wait:
time.sleep(wait) time.sleep(wait)
if isosx: if isosx:
import appscript
connected_device = self.sources['iPod'] connected_device = self.sources['iPod']
dev_books = None dev_books = None
device = self.iTunes.sources[connected_device] device = self.iTunes.sources[connected_device]
@ -2077,6 +2083,7 @@ class ITUNES(DriverBase):
device_books = [] device_books = []
if isosx: if isosx:
import appscript
if 'iPod' in self.sources: if 'iPod' in self.sources:
connected_device = self.sources['iPod'] connected_device = self.sources['iPod']
device = self.iTunes.sources[connected_device] device = self.iTunes.sources[connected_device]
@ -2104,6 +2111,8 @@ class ITUNES(DriverBase):
logger().info() logger().info()
elif iswindows: elif iswindows:
import pythoncom
if 'iPod' in self.sources: if 'iPod' in self.sources:
try: try:
pythoncom.CoInitialize() pythoncom.CoInitialize()
@ -2171,6 +2180,7 @@ class ITUNES(DriverBase):
lib = None lib = None
if isosx: if isosx:
import appscript
for source in self.iTunes.sources(): for source in self.iTunes.sources():
if source.kind() == appscript.k.library: if source.kind() == appscript.k.library:
lib = source lib = source
@ -2341,6 +2351,7 @@ class ITUNES(DriverBase):
logger().info(" ITUNES:_launch_iTunes():\n Instantiating iTunes") logger().info(" ITUNES:_launch_iTunes():\n Instantiating iTunes")
if isosx: if isosx:
import appscript
''' '''
Launch iTunes if not already running Launch iTunes if not already running
''' '''
@ -2382,6 +2393,8 @@ class ITUNES(DriverBase):
logger().info(" calibre_library_path: %s" % self.calibre_library_path) logger().info(" calibre_library_path: %s" % self.calibre_library_path)
if iswindows: if iswindows:
import win32com.client
''' '''
Launch iTunes if not already running Launch iTunes if not already running
Assumes pythoncom wrapper Assumes pythoncom wrapper
@ -2752,6 +2765,8 @@ class ITUNES(DriverBase):
time.sleep(2) time.sleep(2)
print print
elif iswindows: elif iswindows:
import pythoncom, win32com.client
try: try:
pythoncom.CoInitialize() pythoncom.CoInitialize()
self.iTunes = win32com.client.Dispatch("iTunes.Application") self.iTunes = win32com.client.Dispatch("iTunes.Application")
@ -3088,6 +3103,12 @@ class ITUNES_ASYNC(ITUNES):
if DEBUG: if DEBUG:
logger().info("ITUNES_ASYNC:__init__()") logger().info("ITUNES_ASYNC:__init__()")
try:
import appscript
appscript
except:
appscript = None
if isosx and appscript is None: if isosx and appscript is None:
self.connected = False self.connected = False
raise UserFeedback('OSX 10.5 or later required', details=None, level=UserFeedback.WARN) raise UserFeedback('OSX 10.5 or later required', details=None, level=UserFeedback.WARN)
@ -3099,6 +3120,8 @@ class ITUNES_ASYNC(ITUNES):
self._launch_iTunes() self._launch_iTunes()
if iswindows: if iswindows:
import pythoncom
try: try:
pythoncom.CoInitialize() pythoncom.CoInitialize()
self._launch_iTunes() self._launch_iTunes()
@ -3180,6 +3203,8 @@ class ITUNES_ASYNC(ITUNES):
_('%(num)d of %(tot)d') % dict(num=i+1, tot=book_count)) _('%(num)d of %(tot)d') % dict(num=i+1, tot=book_count))
elif iswindows: elif iswindows:
import pythoncom, win32com.client
try: try:
pythoncom.CoInitialize() pythoncom.CoInitialize()
self.iTunes = win32com.client.Dispatch("iTunes.Application") self.iTunes = win32com.client.Dispatch("iTunes.Application")

View File

@ -10,7 +10,7 @@ Generates and writes an APNX page mapping file.
import struct import struct
from calibre.ebooks.mobi.reader import MobiReader from calibre.ebooks.mobi.reader.mobi6 import MobiReader
from calibre.ebooks.pdb.header import PdbHeaderReader from calibre.ebooks.pdb.header import PdbHeaderReader
from calibre.utils.logging import default_log from calibre.utils.logging import default_log

View File

@ -31,7 +31,7 @@ BOOK_EXTENSIONS = ['lrf', 'rar', 'zip', 'rtf', 'lit', 'txt', 'txtz', 'text', 'ht
'epub', 'fb2', 'djv', 'djvu', 'lrx', 'cbr', 'cbz', 'cbc', 'oebzip', 'epub', 'fb2', 'djv', 'djvu', 'lrx', 'cbr', 'cbz', 'cbc', 'oebzip',
'rb', 'imp', 'odt', 'chm', 'tpz', 'azw1', 'pml', 'pmlz', 'mbp', 'tan', 'snb', 'rb', 'imp', 'odt', 'chm', 'tpz', 'azw1', 'pml', 'pmlz', 'mbp', 'tan', 'snb',
'xps', 'oxps', 'azw4', 'book', 'zbf', 'pobi', 'docx', 'md', 'xps', 'oxps', 'azw4', 'book', 'zbf', 'pobi', 'docx', 'md',
'textile', 'markdown'] 'textile', 'markdown', 'ibook', 'iba']
class HTMLRenderer(object): class HTMLRenderer(object):
@ -215,7 +215,11 @@ def unit_convert(value, base, font, dpi):
def generate_masthead(title, output_path=None, width=600, height=60): def generate_masthead(title, output_path=None, width=600, height=60):
from calibre.ebooks.conversion.config import load_defaults from calibre.ebooks.conversion.config import load_defaults
from calibre.utils.fonts import fontconfig from calibre.utils.fonts import fontconfig
font_path = default_font = P('fonts/liberation/LiberationSerif-Bold.ttf') from calibre.utils.config import tweaks
fp = tweaks['generate_cover_title_font']
if not fp:
fp = P('fonts/liberation/LiberationSerif-Bold.ttf')
font_path = default_font = fp
recs = load_defaults('mobi_output') recs = load_defaults('mobi_output')
masthead_font_family = recs.get('masthead_font', 'Default') masthead_font_family = recs.get('masthead_font', 'Default')

View File

@ -22,6 +22,6 @@ class AZW4Input(InputFormatPlugin):
header = PdbHeaderReader(stream) header = PdbHeaderReader(stream)
reader = Reader(header, stream, log, options) reader = Reader(header, stream, log, options)
opf = reader.extract_content(os.getcwd()) opf = reader.extract_content(os.getcwdu())
return opf return opf

View File

@ -173,7 +173,7 @@ class ComicInput(InputFormatPlugin):
comics = [] comics = []
for i, x in enumerate(comics_): for i, x in enumerate(comics_):
title, fname = x title, fname = x
cdir = 'comic_%d'%(i+1) if len(comics_) > 1 else '.' cdir = u'comic_%d'%(i+1) if len(comics_) > 1 else u'.'
cdir = os.path.abspath(cdir) cdir = os.path.abspath(cdir)
if not os.path.exists(cdir): if not os.path.exists(cdir):
os.makedirs(cdir) os.makedirs(cdir)
@ -187,7 +187,7 @@ class ComicInput(InputFormatPlugin):
mi = MetaInformation(os.path.basename(stream.name).rpartition('.')[0], mi = MetaInformation(os.path.basename(stream.name).rpartition('.')[0],
[_('Unknown')]) [_('Unknown')])
opf = OPFCreator(os.path.abspath('.'), mi) opf = OPFCreator(os.getcwdu(), mi)
entries = [] entries = []
def href(x): def href(x):
@ -225,9 +225,9 @@ class ComicInput(InputFormatPlugin):
_('Page')+' %d'%(i+1), play_order=po) _('Page')+' %d'%(i+1), play_order=po)
po += 1 po += 1
opf.set_toc(toc) opf.set_toc(toc)
m, n = open('metadata.opf', 'wb'), open('toc.ncx', 'wb') m, n = open(u'metadata.opf', 'wb'), open('toc.ncx', 'wb')
opf.render(m, n, 'toc.ncx') opf.render(m, n, u'toc.ncx')
return os.path.abspath('metadata.opf') return os.path.abspath(u'metadata.opf')
def create_wrappers(self, pages): def create_wrappers(self, pages):
from calibre.ebooks.oeb.base import XHTML_NS from calibre.ebooks.oeb.base import XHTML_NS
@ -252,7 +252,7 @@ class ComicInput(InputFormatPlugin):
dir = os.path.dirname(pages[0]) dir = os.path.dirname(pages[0])
for i, page in enumerate(pages): for i, page in enumerate(pages):
wrapper = WRAPPER%(XHTML_NS, i+1, os.path.basename(page), i+1) wrapper = WRAPPER%(XHTML_NS, i+1, os.path.basename(page), i+1)
page = os.path.join(dir, 'page_%d.xhtml'%(i+1)) page = os.path.join(dir, u'page_%d.xhtml'%(i+1))
open(page, 'wb').write(wrapper) open(page, 'wb').write(wrapper)
wrappers.append(page) wrappers.append(page)
return wrappers return wrappers

View File

@ -46,7 +46,7 @@ class DJVUInput(InputFormatPlugin):
except: except:
stream.seek(0) # retry with the pure python converter stream.seek(0) # retry with the pure python converter
if ppdjvu: if ppdjvu:
from .djvu import DJVUFile from calibre.ebooks.djvu.djvu import DJVUFile
x = DJVUFile(stream) x = DJVUFile(stream)
x.get_text(stdout) x.get_text(stdout)

Some files were not shown because too many files have changed in this diff Show More