Merge from trunk.

This commit is contained in:
Starson17 2010-12-30 13:45:52 -05:00
commit edcbb70a3a
280 changed files with 292509 additions and 143889 deletions

View File

@ -4,6 +4,11 @@ License: GPL-3
The full text of the GPL is distributed as in
/usr/share/common-licenses/GPL-3 on Debian systems.
Files: src/calibre/ebooks/pdf/*.h,*.cpp
License: GPL-2 or later
The full text of the GPL is distributed as in
/usr/share/common-licenses/GPL-2 on Debian systems.
Files: src/calibre/ebooks/BeautifulSoup.py
Copyright: Copyright (c) 2004-2007, Leonard Richardson
License: BSD

View File

@ -4,6 +4,221 @@
# for important features/bug fixes.
# Also, each release can have new and improved recipes.
- version: 0.7.35
date: 2010-12-23
new features:
- title: "Add a simple to use Rich text editor for comments to the edit metadata dialog."
description: >
"You can now easily add formatting like bold/italic/lists/headings/colors/etc. to book comments via the
edit metadata dialog"
type: major
- title: "E-book viewer: Add a right click menu item 'Inspect' that allows you to inspect the underlying HTML/CSS source of the currently displayed content"
type: major
- title: "When deleting books from the library if a device is connected and the books are also present on the device ask the user if the books should be deleted from the device, the library, or both."
- title: "Add device drivers for Trekstore eBook Player 7, Sanda Bambook, ALuratek Color, Samsung Galaxy, LG Optimus, Motorola Droid 2 and Sunstech EB700"
tickets: [8021, 7966, 7973, 7956]
- title: "Add an entry to the menu of the calibre library button to select a random book from your calibre library"
tickets: [8010]
- title: "SONY driver: Add a couple of special extra collections for all books by author and all books by title, to workaround the broken sorting on newer SONY models. To enable these collections, go to Preferences->Plugins->Device Interface plugins and customize the SONY plugin."
- title: "Edit metadata dialog: When downloading metadata, make the table of matching books sortable"
tickets: [7951]
- title: "Add a success message after a database integrity check completes successfully"
- title: "Search and replace: When using regular expression mode, add a special input field '{template}' that allows use the templating language to create complex input fields. Also allow setting of series_index by search and replace using the same syntax as in the book list, namely, Series Name [series number]"
- title: "Bulk metadata edit: Add option to automatically set cover from the cover present in the actual ebook files"
tickets: [7947]
- title: "E-book viewer: Show format of current book in the title bar."
tickets: [7974]
- title: "Add a tweak to control how author names are displayed in the Tag Browser and Content Server"
- title: "FB2 Output: Restore sectionizing functionality"
bug fixes:
- title: "When in narrow layout, reserve 40% of available width in the book details panel for series/formats/etc and use the rest for comments"
tickets: [8028]
- title: "PDB Input: Fix failure to block-indent PML \t sections"
tickets: [8019]
- title: "Tag browser: When renaming items dont reset the library view and try not to scroll the Tag Browser itself"
- title: "Conversion pipeline: Fix broken link rewriting for inline CSS embedded in HTML"
- title: "Fix regression in 0.7.34 that broke recipes using extra_css to link to SONY device fonts"
tickets: [7995]
- title: "SONY driver: Don't upload thumbnails as they slow down post disconnect processing on older models"
- title: "Content server: Fix a bug that allowed remote users to read arbitrary png/gif/js/css/html files"
tickets: [7980]
- title: "On X11 initialize fontconfig in the GUI thread as Qt also uses fontconfig internally and fontconfig is not thread safe. Fixes a few random crashes on calibre strartup"
- title: "When using the remove specific format actions, only show available formats in the selected books"
tickets: [7967]
- title: "Linux binary build: If setting system default locale fails, try setting locale to en_US.UTF-8 instead"
- title: "Have the title sort tweak respected everywhere"
- title: "PocketBook 701 driver: Swap the main memory and card drives on windows"
- title: "Fix regression in templating that caused series_index to be shown even when book had no series"
tickets: [7949]
- title: "Content server: Fix regressiont hat broke browsing by rating"
- title: "Content server OPDS feeds: Fix parsing of author names as XML"
tickets: [7938]
improved recipes:
- Business Week Magazine
- Gazet van Antwerpen
- La Nacion
- New England Journal of Medicine
- Journal of Hospital Medicine
new recipes:
- title: "NRC Handelsblad (EPUB version)"
author: "veezh"
- title: "CND and wenxuecity - znjy"
author: "Derek Liang"
- title: "Mish's Global Economic Trend Analysis"
author: "Darko Miletic"
- version: 0.7.34
date: 2010-12-17
new features:
- title: "Page turn animations in the e-book viewer"
type: major
description: >
"Now when you use the Page Down/Page Up keys or the next/previous page buttons in the viewer, page turning will be animated. The duration of the animation can be controlled in the viewer preferences. Setting it to 0 disables the animation completely."
- title: "Conversion pipeline: Add an option to set the minimum line height of all elemnts as a percentage of the computed font size. By default, calibre now sets the line height to 120% of the computed font size."
- title: "Large speedup in startup times and post metadata edit wait for large libraries"
- title: "Allow changing the font used in the calibre interface via Preferences->Look and feel"
- title: "Allow editing of the title sort value for a book via the edit metadata dialog"
- title: "Disable the cover cache. This means that if you are running calibre on an underpowered machine, you might notice some slow down in the cover browser. On the other hand, calibre's memory consumption is reduced."
- title: "You can now restart calibre in debug mode by clicking the arrow next to the Preferences button. In debug mode, after you quit calibre, a diagnostic log will popup"
tickets: [7359]
- title: "When creating a new calibre library add an option to copy the custom column, saved searches, etc from the current library."
tickets: [7643]
- title: "Add more tweaks to control how the next available series number is calculated."
tickets: [7892]
- title: "Add a tweak to control layout of the custom metadata tab in the edit metadata dialog"
- title: "Apple driver: Set series number as track number on windows when sending books to iTunes"
- title: "Drivers for PocketBook 701 and Samsung E65"
- title: "E-book viewer: Add option to have the mouse wheel flip pages"
- title: "Add a load_resources method to the InterfaceAction and Plugin classes to facilitate loading of resources from plugin ZIP files"
- title: "E-book viewer: Add option to not remember position in book when quitting."
tickets: [7699]
- title: "When sorting the book list, keep the current book visible after the sort completes."
tickets: [7504]
- title: "EPUB Output: Add an option to flatten the EPUB file structure, specially for FBReaderJ."
tickets: [7788]
- title: "EPUB Output: Ensure all files inside the generated EPUB have unique filenames, to support broken EPUB readers like Stanza, Aldiko, FBReader and Sigil"
- title: "FB2 Output: Add support for some 2.1 style tags."
- title: "Bulk metadata edit: Add options to delete cover/generate default cover."
tickets: [7885]
- title: "Fix a regression in 0.7.33 that broke updating covers in ebook files when saving to disk."
tickets: [7886]
- title: "Don't refresh the Tag browser if it is hidden. Speeds up metadata editing with large libraries, if you hide teh Tag Browser."
- title: "MOBI Output: Add option to ignore margins in input document"
tickets: [7877]
- title: "Kobo driver: Add support for 1.8.x firmware"
bug fixes:
- title: "Fix various memory leaks introduced in the last couple of releases"
- title: "EPUB metadata: When rendering first page as the cover, handle embedded svg correctly."
tickets: [7909]
- title: "Disable multiple library support when the CALIBRE_OVERRIDE_DATABASE_PATH env var is set"
- title: "Content server: Fix bug that could cause saved search based restrictions to not exclude all books"
tickets: [7876]
- title: "Topaz metadata: Read metadata correctly from Topaz files that have MOBI file extensions"
- title: "MOBI Input: Handle the (rare) MOBI files that do not specify per paragraph text indents correctly."
tickets: [7869]
- title: "MOBI metadata reader: Handle invalid PRC files with spurious image_offset headers"
- title: "Fix drag/drop of new cover to book detail panel does not update cover browser"
tickets: [7890]
- title: "Do not open the book details dialog when double click on the scrollbars in the book details panel"
tickets: [7826]
- title: "Templates: Fix {tags} not working when no tags are present"
tickets: [7888]
- title: "HTML metadata: Fix regression that broke parsing of some meta tags"
tickets: [7851]
- title: "Preferences: Add tooltips to buddy labels as well."
tickets: [7873]
- title: "Content server: Fix handling of root URL when using --url-prefix"
- title: "Ensure that the default encoding used by python is never ASCII (needed when running a non frozen version of calibre on linux)"
improved recipes:
- Astronomy Picture of the day
- New Scientist
- Radikal
- Times of India
- Economic Times
- Zeit Online
- Dilbert
new recipes:
- title: "Various Japanes news sources, National Geographic and paper.li"
author: "Hiroshi Miura"
- title: "Science based medicine"
author: "BuzzKill"
- title: "Kompiutierra"
author: "Vadim Dyadkin"
- version: 0.7.33
date: 2010-12-10

View File

Before

Width:  |  Height:  |  Size: 133 KiB

After

Width:  |  Height:  |  Size: 133 KiB

831
imgsrc/edit-cut.svg Normal file
View File

@ -0,0 +1,831 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://web.resource.org/cc/"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="128"
height="128"
id="svg2"
sodipodi:version="0.32"
inkscape:version="0.45.1"
version="1.0"
sodipodi:docbase="/home/david/Oxygen/trunk/scalable/actions"
sodipodi:docname="edit-cut.svgz"
inkscape:output_extension="org.inkscape.output.svgz.inkscape"
inkscape:export-filename="edit-cut.png"
inkscape:export-xdpi="22.5"
inkscape:export-ydpi="22.5">
<defs
id="defs4">
<linearGradient
inkscape:collect="always"
id="linearGradient4792">
<stop
style="stop-color:#000000;stop-opacity:1;"
offset="0"
id="stop4794" />
<stop
style="stop-color:#000000;stop-opacity:0;"
offset="1"
id="stop4796" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient4758">
<stop
style="stop-color:#ffffff;stop-opacity:1;"
offset="0"
id="stop4760" />
<stop
style="stop-color:#ffffff;stop-opacity:0;"
offset="1"
id="stop4762" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient4722">
<stop
style="stop-color:#dfdfdf;stop-opacity:1"
offset="0"
id="stop4724" />
<stop
style="stop-color:#606060;stop-opacity:1"
offset="1"
id="stop4726" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient4635">
<stop
style="stop-color:#ffffff;stop-opacity:1;"
offset="0"
id="stop4637" />
<stop
style="stop-color:#ffffff;stop-opacity:0;"
offset="1"
id="stop4639" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient4618">
<stop
style="stop-color:#ffffff;stop-opacity:1;"
offset="0"
id="stop4620" />
<stop
style="stop-color:#ffffff;stop-opacity:0;"
offset="1"
id="stop4622" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient4488">
<stop
style="stop-color:#ffffff;stop-opacity:1;"
offset="0"
id="stop4490" />
<stop
style="stop-color:#ffffff;stop-opacity:0;"
offset="1"
id="stop4492" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient4445">
<stop
style="stop-color:#606060;stop-opacity:1"
offset="0"
id="stop4447" />
<stop
style="stop-color:#343434;stop-opacity:1"
offset="1"
id="stop4449" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient4253">
<stop
style="stop-color:#000000;stop-opacity:1;"
offset="0"
id="stop4255" />
<stop
style="stop-color:#000000;stop-opacity:0;"
offset="1"
id="stop4257" />
</linearGradient>
<linearGradient
id="linearGradient3792"
inkscape:collect="always">
<stop
id="stop3794"
offset="0"
style="stop-color:#ffffff;stop-opacity:1" />
<stop
id="stop3796"
offset="1"
style="stop-color:#e7e7e7;stop-opacity:1" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient3631">
<stop
style="stop-color:#000000;stop-opacity:1;"
offset="0"
id="stop3633" />
<stop
style="stop-color:#000000;stop-opacity:0;"
offset="1"
id="stop3635" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient3475">
<stop
style="stop-color:#eeeeee;stop-opacity:1;"
offset="0"
id="stop3477" />
<stop
style="stop-color:#cbcbcb;stop-opacity:1"
offset="1"
id="stop3479" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient3467">
<stop
style="stop-color:#e8e8e8;stop-opacity:1"
offset="0"
id="stop3469" />
<stop
style="stop-color:#888888;stop-opacity:0.53714287"
offset="1"
id="stop3471" />
</linearGradient>
<linearGradient
id="linearGradient3176"
inkscape:collect="always">
<stop
id="stop3178"
offset="0"
style="stop-color:#323232;stop-opacity:1;" />
<stop
id="stop3180"
offset="1"
style="stop-color:#000000;stop-opacity:1" />
</linearGradient>
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3176"
id="linearGradient3516"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.7547529,-0.4357568,0.4357568,0.7547529,-12.315637,39.880442)"
x1="63.245899"
y1="107.23933"
x2="58.32019"
y2="107.5107" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3176"
id="linearGradient3518"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.7547529,-0.4357568,0.4357568,0.7547529,-12.315636,39.008928)"
x1="69.501228"
y1="109.56824"
x2="56.484062"
y2="117.84955" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3176"
id="linearGradient3520"
gradientUnits="userSpaceOnUse"
gradientTransform="translate(-0.5000017,0.8660252)"
x1="63.245899"
y1="107.23933"
x2="58.32019"
y2="107.5107" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3176"
id="linearGradient3522"
gradientUnits="userSpaceOnUse"
x1="69.501228"
y1="109.56824"
x2="56.484062"
y2="117.84955" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3475"
id="linearGradient3524"
gradientUnits="userSpaceOnUse"
x1="62.646275"
y1="53.750923"
x2="52.066586"
y2="53.750923" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3176"
id="linearGradient3526"
gradientUnits="userSpaceOnUse"
gradientTransform="translate(-0.5000017,0.8660252)"
x1="63.245899"
y1="107.23933"
x2="58.32019"
y2="107.5107" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3467"
id="linearGradient3530"
gradientUnits="userSpaceOnUse"
x1="63.553711"
y1="16.056862"
x2="63.553711"
y2="63.136379" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3467"
id="linearGradient3609"
gradientUnits="userSpaceOnUse"
x1="63.553711"
y1="16.056862"
x2="63.553711"
y2="63.136379"
gradientTransform="matrix(0.8715135,3.655296e-8,-3.655296e-8,0.8715135,8.2476912,8.4795979)" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3176"
id="linearGradient3614"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.7547529,-0.4357568,0.4357568,0.7547529,-12.315637,39.880442)"
x1="63.245899"
y1="107.23933"
x2="58.32019"
y2="107.5107" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3792"
id="linearGradient3617"
gradientUnits="userSpaceOnUse"
x1="62.646275"
y1="53.750923"
x2="52.066586"
y2="53.750923"
gradientTransform="matrix(0.8513047,-0.4911732,0.4915009,0.850737,-22.138279,32.363934)" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3176"
id="linearGradient3625"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.7547529,-0.4357568,0.4357568,0.7547529,-12.315637,39.880442)"
x1="63.245899"
y1="107.23933"
x2="58.32019"
y2="107.5107" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3176"
id="linearGradient3627"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.7547529,-0.4357568,0.4357568,0.7547529,-12.315636,39.008928)"
x1="69.501228"
y1="109.56824"
x2="56.484062"
y2="117.84955" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3631"
id="linearGradient3637"
x1="61.911907"
y1="72.456772"
x2="59.719414"
y2="73.709625"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(1.8541426,0,0,1.8529064,-52.138638,-62.025773)" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3631"
id="linearGradient3641"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(-5.0049887,0.680543,-0.6885831,-4.9465506,418.08891,395.15615)"
x1="61.911907"
y1="72.456772"
x2="59.719414"
y2="73.709625" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3467"
id="linearGradient3644"
gradientUnits="userSpaceOnUse"
x1="63.553711"
y1="16.056862"
x2="63.553711"
y2="63.136379"
gradientTransform="matrix(-0.8715135,3.655296e-8,3.655296e-8,0.8715135,120.39367,8.4795979)" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3176"
id="linearGradient3649"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(-0.7547529,-0.4357568,-0.4357568,0.7547529,140.957,39.880442)"
x1="63.245899"
y1="107.23933"
x2="58.32019"
y2="107.5107" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3475"
id="linearGradient3652"
gradientUnits="userSpaceOnUse"
x1="62.646275"
y1="53.750923"
x2="52.066586"
y2="53.750923"
gradientTransform="matrix(-0.7547529,-0.4357568,-0.4357568,0.7547529,140.957,39.008928)" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3176"
id="linearGradient3655"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(-0.7547529,-0.4357568,-0.4357568,0.7547529,140.957,38.380442)"
x1="63.245899"
y1="107.23933"
x2="58.32019"
y2="107.5107" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3176"
id="linearGradient3657"
gradientUnits="userSpaceOnUse"
x1="69.501228"
y1="109.56824"
x2="56.484062"
y2="117.84955"
gradientTransform="matrix(-0.7547529,-0.4357568,-0.4357568,0.7547529,140.957,37.508928)" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3176"
id="linearGradient3669"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.7547529,-0.4357568,0.4357568,0.7547529,-12.315637,39.880442)"
x1="63.245899"
y1="107.23933"
x2="58.32019"
y2="107.5107" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3176"
id="linearGradient3671"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.7547529,-0.4357568,0.4357568,0.7547529,-12.315636,39.008928)"
x1="69.501228"
y1="109.56824"
x2="56.484062"
y2="117.84955" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3792"
id="linearGradient3802"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(-0.8513047,-0.4911732,-0.4915009,0.850737,150.74175,32.363934)"
x1="62.646275"
y1="53.750923"
x2="52.066586"
y2="53.750923" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3176"
id="linearGradient3838"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(-0.7547529,-0.4357568,-0.4357568,0.7547529,140.957,40.880442)"
x1="63.245899"
y1="107.23933"
x2="58.32019"
y2="107.5107" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3176"
id="linearGradient3847"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.7547529,-0.4357568,0.4357568,0.7547529,-12.013047,38.380442)"
x1="63.245899"
y1="107.23933"
x2="58.32019"
y2="107.5107" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3176"
id="linearGradient3849"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.7547529,-0.4357568,0.4357568,0.7547529,-12.013047,37.508928)"
x1="69.501228"
y1="109.56824"
x2="56.484062"
y2="117.84955" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3176"
id="linearGradient3881"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(-0.7547529,-0.4357568,-0.4357568,0.7547529,140.957,37.508928)"
x1="69.501228"
y1="109.56824"
x2="56.484062"
y2="117.84955" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3176"
id="linearGradient3887"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.7547529,-0.4357568,0.4357568,0.7547529,-12.013047,37.508928)"
x1="69.501228"
y1="109.56824"
x2="56.484062"
y2="117.84955" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3176"
id="linearGradient4006"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(-0.8513047,-0.4911732,-0.4915009,0.850737,150.74175,33.346282)"
x1="63.245899"
y1="107.23933"
x2="58.32019"
y2="107.5107" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3176"
id="linearGradient4085"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(-0.8513047,-0.4911732,-0.4915009,0.850737,149.24175,36.346282)"
x1="63.245899"
y1="107.23933"
x2="58.32019"
y2="107.5107" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3176"
id="linearGradient4087"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(-0.8513047,-0.4911732,-0.4915009,0.850737,150.74175,33.346282)"
x1="63.245899"
y1="107.23933"
x2="58.32019"
y2="107.5107" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3792"
id="linearGradient4250"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(-0.8513047,-0.4911732,-0.4915009,0.850737,150.74175,32.363934)"
x1="62.646275"
y1="53.750923"
x2="52.066586"
y2="53.750923" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient4253"
id="linearGradient4259"
x1="65.414917"
y1="63.2187"
x2="71.566734"
y2="58.624897"
gradientUnits="userSpaceOnUse" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient4445"
id="linearGradient4443"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.8513046,-0.4911732,0.4915009,0.850737,-21.796979,30.673174)"
x1="66.410789"
y1="111.09748"
x2="56.771309"
y2="111.40427" />
<clipPath
clipPathUnits="userSpaceOnUse"
id="clipPath4455">
<path
style="fill:#343434;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:6;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
d="M 75.1875,70.0625 C 74.330819,70.112586 73.601545,70.703549 73.375,71.53125 L 71.21875,79.59375 C 71.09204,80.103085 71.170605,80.641816 71.4375,81.09375 L 89.625,112.5625 C 91.972816,116.62633 95.167933,119.72417 98.625,121.46875 C 102.08207,123.21333 105.97364,123.61751 109.15625,121.78125 C 112.33881,119.94502 113.935,116.36699 114.15625,112.5 C 114.3775,108.63301 113.28602,104.31505 110.9375,100.25 C 108.5891,96.185153 105.39577,93.080216 101.9375,91.34375 C 98.479225,89.607284 94.586162,89.2278 91.40625,91.0625 C 90.654039,91.496499 90.578013,91.480641 90,91.1875 C 89.421987,90.894359 88.429852,89.981353 87.25,88.34375 C 84.890296,85.068543 81.724299,79.185679 77.03125,71.0625 C 76.654739,70.40782 75.941586,70.021025 75.1875,70.0625 z M 97.9375,99.03125 C 99.847583,98.899901 103.29363,100.88292 105.4375,104.59375 C 106.75724,106.8781 107.33023,109.26773 107.25,111.125 C 107.16977,112.98227 106.50165,114.13243 105.59375,114.65625 C 104.68591,115.18004 103.39934,115.17251 101.75,114.3125 C 100.10066,113.45249 98.288767,111.75356 96.96875,109.46875 C 95.649001,107.18439 95.076021,104.76352 95.15625,102.90625 C 95.236479,101.04898 95.873353,99.898826 96.78125,99.375 C 97.154526,99.159631 97.520851,99.0599 97.9375,99.03125 z "
id="path4457"
sodipodi:nodetypes="cccccsssssssscccsssssssc" />
</clipPath>
<filter
inkscape:collect="always"
id="filter4475">
<feGaussianBlur
inkscape:collect="always"
stdDeviation="0.47498194"
id="feGaussianBlur4477" />
</filter>
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient4488"
id="linearGradient4494"
x1="100.23751"
y1="84.952927"
x2="115.33315"
y2="111.09933"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(1.1304268,0,0,1.1304268,-12.155804,-11.996273)" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient4445"
id="linearGradient4572"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(-0.8513046,-0.4911732,-0.4915009,0.850737,150.64081,30.673174)"
x1="66.410789"
y1="111.09748"
x2="56.771309"
y2="111.40427" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient4488"
id="linearGradient4574"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(-1.1304268,0,0,1.1304268,140.99964,-11.996273)"
x1="100.23751"
y1="84.952927"
x2="115.33315"
y2="111.09933" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient4618"
id="linearGradient4624"
x1="39.66201"
y1="99.394554"
x2="32.5625"
y2="108.58216"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(-0.7874752,0.1143529,-0.1388531,-0.6485276,62.315135,175.57221)" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient4445"
id="linearGradient4630"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(-0.8513046,-0.4911732,-0.4915009,0.850737,150.14081,34.173174)"
x1="66.410789"
y1="111.09748"
x2="56.771309"
y2="111.40427" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient4445"
id="linearGradient4632"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(-0.8513046,-0.4911732,-0.4915009,0.850737,150.64081,30.673174)"
x1="66.410789"
y1="111.09748"
x2="56.771309"
y2="111.40427" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient4635"
id="linearGradient4641"
x1="24.636236"
y1="118.53715"
x2="24.636236"
y2="110.80067"
gradientUnits="userSpaceOnUse" />
<filter
inkscape:collect="always"
id="filter4691">
<feGaussianBlur
inkscape:collect="always"
stdDeviation="0.41898454"
id="feGaussianBlur4693" />
</filter>
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient4635"
id="linearGradient4701"
gradientUnits="userSpaceOnUse"
x1="24.636236"
y1="118.53715"
x2="24.636236"
y2="110.80067" />
<filter
inkscape:collect="always"
id="filter4711">
<feGaussianBlur
inkscape:collect="always"
stdDeviation="0.71627592"
id="feGaussianBlur4713" />
</filter>
<filter
inkscape:collect="always"
id="filter4715">
<feGaussianBlur
inkscape:collect="always"
stdDeviation="0.71627592"
id="feGaussianBlur4717" />
</filter>
<radialGradient
inkscape:collect="always"
xlink:href="#linearGradient4722"
id="radialGradient4728"
cx="66"
cy="54"
fx="66.495979"
fy="53.140942"
r="2"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(1.0315502,-0.5959631,0.5955656,1.0308618,-34.242853,37.667027)" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient4758"
id="linearGradient4764"
x1="51.619904"
y1="81.644371"
x2="46.564438"
y2="99.975533"
gradientUnits="userSpaceOnUse"
gradientTransform="translate(3.132137,-2.5057096)" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient4792"
id="linearGradient4798"
x1="64.46875"
y1="67.044975"
x2="65.410522"
y2="80.970673"
gradientUnits="userSpaceOnUse" />
<filter
inkscape:collect="always"
id="filter4821">
<feGaussianBlur
inkscape:collect="always"
stdDeviation="0.98653907"
id="feGaussianBlur4823" />
</filter>
</defs>
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
gridtolerance="10000"
guidetolerance="10"
objecttolerance="10"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="4.7890625"
inkscape:cx="64"
inkscape:cy="64"
inkscape:document-units="px"
inkscape:current-layer="g3690"
width="128px"
height="128px"
showgrid="false"
gridspacingx="2px"
gridspacingy="2px"
gridempspacing="4"
inkscape:grid-points="true"
showborder="false"
borderlayer="false"
inkscape:showpageshadow="false"
inkscape:window-width="794"
inkscape:window-height="730"
inkscape:window-x="0"
inkscape:window-y="0" />
<metadata
id="metadata7">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1">
<g
id="g3690">
<path
style="opacity:0.5;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient4798);stroke-width:3.54751818;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter4821)"
d="M 35.28125,14.46875 C 34.326418,15.020173 34.451101,16.184543 34.6875,16.78125 C 41.824923,35.39161 48.223262,52.347787 58.15625,70.75 L 56.625,73.21875 C 56.244936,72.547795 55.487111,72.144016 54.6875,72.21875 C 54.034234,72.275803 53.450423,72.649442 53.125,73.21875 C 48.962268,80.428813 46.167367,85.651204 44.09375,88.53125 C 43.056941,89.971273 42.203917,90.769641 41.75,91 C 41.296083,91.230359 41.306883,91.263851 40.6875,90.90625 C 37.787275,89.231804 34.269689,89.591888 31.15625,91.15625 C 28.042811,92.720612 25.164593,95.484072 23.0625,99.125 C 20.960291,102.76613 19.988354,106.64212 20.1875,110.125 C 20.386646,113.60788 21.847132,116.85528 24.75,118.53125 C 27.652918,120.20725 31.200269,119.82161 34.3125,118.25 C 37.424731,116.67839 40.273497,113.92116 42.375,110.28125 L 58.5,82.34375 C 58.674659,82.047999 58.772646,81.71253 58.78125,81.375 C 60.494006,80.173524 62.436255,79.043092 64.3125,78.25 C 66.301575,79.090643 68.394793,80.309785 70.1875,81.59375 C 70.216655,81.858831 70.304417,82.106865 70.4375,82.34375 L 86.5625,110.28125 C 88.664002,113.92116 91.544019,116.67839 94.65625,118.25 C 97.768481,119.82161 101.28458,120.20725 104.1875,118.53125 C 107.09037,116.85528 108.55085,113.60788 108.75,110.125 C 108.94915,106.64212 107.97721,102.76613 105.875,99.125 C 103.77291,95.484072 100.89469,92.720612 97.78125,91.15625 C 94.667811,89.591888 91.150225,89.231804 88.25,90.90625 C 87.630617,91.263851 87.641417,91.230359 87.1875,91 C 86.733583,90.769641 85.880559,89.971273 84.84375,88.53125 C 82.770133,85.651204 79.975232,80.428813 75.8125,73.21875 C 75.435989,72.56407 74.722836,72.177275 73.96875,72.21875 C 73.178739,72.268279 72.494718,72.772672 72.21875,73.5 L 70.5,70.75 C 80.43299,52.347791 86.83133,35.391608 93.96875,16.78125 C 94.205146,16.184543 94.329835,15.020174 93.375,14.46875 L 64.3125,60.875 L 35.28125,14.46875 z M 34.78125,98.375 C 35.1163,98.398055 35.384215,98.481147 35.6875,98.65625 C 36.408735,99.072656 36.962348,100.02888 37.03125,101.625 C 37.100152,103.22112 36.619402,105.28826 35.46875,107.28125 C 34.317829,109.27471 32.759661,110.76121 31.34375,111.5 C 29.927839,112.23879 28.846185,112.22887 28.125,111.8125 C 27.40377,111.3961 26.850153,110.47112 26.78125,108.875 C 26.712347,107.27888 27.193102,105.18048 28.34375,103.1875 C 30.21285,99.950123 33.224696,98.267893 34.78125,98.375 z M 94.15625,98.375 C 95.712802,98.267893 98.724654,99.950125 100.59375,103.1875 C 101.7444,105.18048 102.22515,107.27888 102.15625,108.875 C 102.08735,110.47112 101.56498,111.3961 100.84375,111.8125 C 100.12257,112.22887 99.04091,112.23879 97.625,111.5 C 96.20909,110.76121 94.650922,109.27471 93.5,107.28125 C 92.349349,105.28826 91.837348,103.22112 91.90625,101.625 C 91.975152,100.02888 92.528765,99.072656 93.25,98.65625 C 93.553281,98.48115 93.8212,98.398055 94.15625,98.375 z "
id="path3483"
transform="matrix(1.1279249,0,0,1.1271729,-8.2471649,-11.605871)" />
<path
sodipodi:nodetypes="cccccsssssssscccsssssssc"
id="path4566"
d="M 53.65633,70.0625 C 54.51301,70.112586 55.24229,70.703549 55.46883,71.53125 L 57.62508,79.59375 C 57.75179,80.103085 57.67323,80.641816 57.40633,81.09375 L 39.218832,112.5625 C 36.871016,116.62633 33.675899,119.72417 30.218832,121.46875 C 26.761762,123.21333 22.870192,123.61751 19.687582,121.78125 C 16.505022,119.94502 14.908832,116.36699 14.687582,112.5 C 14.466332,108.63301 15.557812,104.31505 17.906332,100.25 C 20.254732,96.185153 23.448062,93.080216 26.906332,91.34375 C 30.364607,89.607284 34.25767,89.2278 37.437582,91.0625 C 38.189793,91.496499 38.265819,91.480641 38.843832,91.1875 C 39.421845,90.894359 40.41398,89.981353 41.593832,88.34375 C 43.95354,85.068543 47.11953,79.185679 51.81258,71.0625 C 52.18909,70.40782 52.90225,70.021025 53.65633,70.0625 z M 30.906332,99.03125 C 28.996249,98.899901 25.550202,100.88292 23.406332,104.59375 C 22.086592,106.8781 21.513602,109.26773 21.593832,111.125 C 21.674062,112.98227 22.342182,114.13243 23.250082,114.65625 C 24.157922,115.18004 25.444492,115.17251 27.093832,114.3125 C 28.743172,113.45249 30.555065,111.75356 31.875082,109.46875 C 33.194831,107.18439 33.767811,104.76352 33.687582,102.90625 C 33.607353,101.04898 32.970479,99.898826 32.062582,99.375 C 31.689306,99.159631 31.322981,99.0599 30.906332,99.03125 z "
style="fill:url(#linearGradient4572);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:3.99999976;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
<path
sodipodi:nodetypes="cccscssssccsssscc"
id="path4568"
d="M 55.65241,70.312928 C 56.57508,70.366873 57.35134,71.011135 57.59533,71.902591 L 57.66599,72.185197 C 57.29393,71.760421 56.80725,71.477828 56.21763,71.443355 C 55.40545,71.398686 54.60957,71.798027 54.20405,72.50313 C 49.14952,81.252005 45.75918,87.592357 43.217718,91.119846 C 41.946986,92.883587 40.872884,93.877473 40.250348,94.193194 C 39.627813,94.508912 39.541489,94.519318 38.731337,94.051891 C 35.306492,92.075871 31.1164,92.499608 27.391742,94.369823 C 23.667082,96.240042 20.241732,99.5652 17.712462,103.94313 L 17.147252,102.8127 C 19.676522,98.434769 23.101862,95.109615 26.826532,93.239397 C 30.551186,91.369181 34.741278,90.945444 38.166123,92.921464 C 38.976275,93.388891 39.062599,93.378485 39.685134,93.062767 C 40.307671,92.747046 41.381773,91.75316 42.652505,89.989419 C 45.19396,86.461931 48.58431,80.121578 53.63884,71.372703 C 54.04436,70.667601 54.84024,70.268259 55.65241,70.312928 z "
style="opacity:0.77227723;fill:url(#linearGradient4574);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:3.99999976;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
<path
clip-path="url(#clipPath4455)"
style="opacity:0.70297032;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:3;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter4475)"
d="M 75.1875,70.0625 C 74.330819,70.112586 73.601545,70.703549 73.375,71.53125 L 71.21875,79.59375 C 71.09204,80.103085 71.170605,80.641816 71.4375,81.09375 L 89.625,112.5625 C 91.972816,116.62633 95.167933,119.72417 98.625,121.46875 C 102.08207,123.21333 105.97364,123.61751 109.15625,121.78125 C 112.33881,119.94502 113.935,116.36699 114.15625,112.5 C 114.3775,108.63301 113.28602,104.31505 110.9375,100.25 C 108.5891,96.185153 105.39577,93.080216 101.9375,91.34375 C 98.479225,89.607284 94.586162,89.2278 91.40625,91.0625 C 90.654039,91.496499 90.578013,91.480641 90,91.1875 C 89.421987,90.894359 88.429852,89.981353 87.25,88.34375 C 84.890296,85.068543 81.724299,79.185679 77.03125,71.0625 C 76.654739,70.40782 75.941586,70.021025 75.1875,70.0625 z M 97.9375,99.03125 C 99.847583,98.899901 103.29363,100.88292 105.4375,104.59375 C 106.75724,106.8781 107.33023,109.26773 107.25,111.125 C 107.16977,112.98227 106.50165,114.13243 105.59375,114.65625 C 104.68591,115.18004 103.39934,115.17251 101.75,114.3125 C 100.10066,113.45249 98.288767,111.75356 96.96875,109.46875 C 95.649001,107.18439 95.076021,104.76352 95.15625,102.90625 C 95.236479,101.04898 95.873353,99.898826 96.78125,99.375 C 97.154526,99.159631 97.520851,99.0599 97.9375,99.03125 z "
id="path4570"
sodipodi:nodetypes="cccccsssssssscccsssssssc"
transform="matrix(-1,0,0,1,128.84383,0)" />
<path
style="fill:url(#linearGradient4443);fill-opacity:1.0;fill-rule:evenodd;stroke:none;stroke-width:3.99999976;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
d="M 75.1875,70.0625 C 74.330819,70.112586 73.601545,70.703549 73.375,71.53125 L 71.21875,79.59375 C 71.09204,80.103085 71.170605,80.641816 71.4375,81.09375 L 89.625,112.5625 C 91.972816,116.62633 95.167933,119.72417 98.625,121.46875 C 102.08207,123.21333 105.97364,123.61751 109.15625,121.78125 C 112.33881,119.94502 113.935,116.36699 114.15625,112.5 C 114.3775,108.63301 113.28602,104.31505 110.9375,100.25 C 108.5891,96.185153 105.39577,93.080216 101.9375,91.34375 C 98.479225,89.607284 94.586162,89.2278 91.40625,91.0625 C 90.654039,91.496499 90.578013,91.480641 90,91.1875 C 89.421987,90.894359 88.429852,89.981353 87.25,88.34375 C 84.890296,85.068543 81.724299,79.185679 77.03125,71.0625 C 76.654739,70.40782 75.941586,70.021025 75.1875,70.0625 z M 97.9375,99.03125 C 99.847583,98.899901 103.29363,100.88292 105.4375,104.59375 C 106.75724,106.8781 107.33023,109.26773 107.25,111.125 C 107.16977,112.98227 106.50165,114.13243 105.59375,114.65625 C 104.68591,115.18004 103.39934,115.17251 101.75,114.3125 C 100.10066,113.45249 98.288767,111.75356 96.96875,109.46875 C 95.649001,107.18439 95.076021,104.76352 95.15625,102.90625 C 95.236479,101.04898 95.873353,99.898826 96.78125,99.375 C 97.154526,99.159631 97.520851,99.0599 97.9375,99.03125 z "
id="path3845"
sodipodi:nodetypes="cccccsssssssscccsssssssc" />
<path
style="opacity:0.77227723;fill:url(#linearGradient4494);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:3.99999976;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
d="M 73.191419,70.312928 C 72.268749,70.366873 71.492489,71.011135 71.248498,71.902591 L 71.177846,72.185197 C 71.549903,71.760421 72.036582,71.477828 72.626206,71.443355 C 73.438379,71.398686 74.234264,71.798027 74.639778,72.50313 C 79.694311,81.252005 83.084654,87.592357 85.626114,91.119846 C 86.896846,92.883587 87.970948,93.877473 88.593484,94.193194 C 89.216019,94.508912 89.302343,94.519318 90.112495,94.051891 C 93.53734,92.075871 97.727432,92.499608 101.45209,94.369823 C 105.17675,96.240042 108.6021,99.5652 111.13137,103.94313 L 111.69658,102.8127 C 109.16731,98.434769 105.74197,95.109615 102.0173,93.239397 C 98.292646,91.369181 94.102554,90.945444 90.677709,92.921464 C 89.867557,93.388891 89.781233,93.378485 89.158698,93.062767 C 88.536161,92.747046 87.462059,91.75316 86.191327,89.989419 C 83.649868,86.461931 80.259524,80.121578 75.204992,71.372703 C 74.799477,70.667601 74.003592,70.268259 73.191419,70.312928 z "
id="path4479"
sodipodi:nodetypes="cccscssssccsssscc" />
<path
sodipodi:nodetypes="cccccsssssssscccsssssssc"
id="path4451"
d="M 75.1875,70.0625 C 74.330819,70.112586 73.601545,70.703549 73.375,71.53125 L 71.21875,79.59375 C 71.09204,80.103085 71.170605,80.641816 71.4375,81.09375 L 89.625,112.5625 C 91.972816,116.62633 95.167933,119.72417 98.625,121.46875 C 102.08207,123.21333 105.97364,123.61751 109.15625,121.78125 C 112.33881,119.94502 113.935,116.36699 114.15625,112.5 C 114.3775,108.63301 113.28602,104.31505 110.9375,100.25 C 108.5891,96.185153 105.39577,93.080216 101.9375,91.34375 C 98.479225,89.607284 94.586162,89.2278 91.40625,91.0625 C 90.654039,91.496499 90.578013,91.480641 90,91.1875 C 89.421987,90.894359 88.429852,89.981353 87.25,88.34375 C 84.890296,85.068543 81.724299,79.185679 77.03125,71.0625 C 76.654739,70.40782 75.941586,70.021025 75.1875,70.0625 z M 97.9375,99.03125 C 99.847583,98.899901 103.29363,100.88292 105.4375,104.59375 C 106.75724,106.8781 107.33023,109.26773 107.25,111.125 C 107.16977,112.98227 106.50165,114.13243 105.59375,114.65625 C 104.68591,115.18004 103.39934,115.17251 101.75,114.3125 C 100.10066,113.45249 98.288767,111.75356 96.96875,109.46875 C 95.649001,107.18439 95.076021,104.76352 95.15625,102.90625 C 95.236479,101.04898 95.873353,99.898826 96.78125,99.375 C 97.154526,99.159631 97.520851,99.0599 97.9375,99.03125 z "
style="fill:none;fill-opacity:1.0;fill-rule:evenodd;stroke:#000000;stroke-width:3;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter4475);opacity:0.7029703"
clip-path="url(#clipPath4455)" />
<path
style="fill:url(#linearGradient3802);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 97.96087,1.9111929 L 53.629372,72.750318 L 52.479384,84.564291 C 54.445388,81.161344 61.782615,76.320141 67.153838,74.881885 C 81.238527,50.502756 89.411871,28.739567 98.680477,4.5950145 C 98.954672,3.9035524 99.068368,2.5501817 97.96087,1.9111929 z "
id="path3506"
sodipodi:nodetypes="cccccc" />
<path
sodipodi:nodetypes="cc"
id="path4269"
d="M 97.692496,2.3849918 L 61.738989,59.428222"
style="fill:none;fill-rule:evenodd;stroke:#ffffff;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<path
style="fill:url(#linearGradient3637);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 59.75 65.65625 L 57.71875 66.21875 L 56.28125 68.5 L 60.59375 77.71875 C 62.419602 76.674902 64.33553 75.770565 66.09375 75.1875 L 59.75 65.65625 z "
id="path3629" />
<path
style="fill:url(#linearGradient4259);fill-opacity:1.0;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 67.1875 51.0625 L 59 64.15625 L 66.1875 75.15625 C 66.513817 75.049746 66.842006 74.959145 67.15625 74.875 C 69.715966 70.444398 72.069962 66.100297 74.28125 61.8125 L 67.1875 51.0625 z "
id="path4248" />
<path
sodipodi:nodetypes="cccccc"
id="path2160"
d="M 30.642611,1.9111929 L 74.974109,72.750318 L 76.124096,84.564291 C 74.158093,81.161344 66.820866,76.320141 61.449644,74.881885 C 47.364955,50.502756 39.191609,28.739567 29.923004,4.5950145 C 29.648808,3.9035524 29.535112,2.5501817 30.642611,1.9111929 z "
style="fill:url(#linearGradient3617);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<path
style="fill:none;fill-rule:evenodd;stroke:#ffffff;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 30.738989,2.3849918 L 64.192496,55.928222"
id="path4267"
sodipodi:nodetypes="cc" />
<path
transform="matrix(1.4403715,-0.8310572,0.8315989,1.4394331,-75.999063,45.513227)"
d="M 68 54 A 2 2 0 1 1 64,54 A 2 2 0 1 1 68 54 z"
sodipodi:ry="2"
sodipodi:rx="2"
sodipodi:cy="54"
sodipodi:cx="66"
id="path3146"
style="opacity:0.96000001;fill:url(#radialGradient4728);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:4;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
sodipodi:type="arc" />
<image
y="0"
x="160"
id="image4278"
height="128"
width="128"
sodipodi:absref="/home/david/Oxygen/trunk/32x32/actions/edit-copy.png"
xlink:href="/home/david/Oxygen/trunk/32x32/actions/edit-copy.png" />
<image
y="0"
x="288"
id="image4288"
height="128"
width="128"
sodipodi:absref="/home/david/Oxygen/trunk/32x32/actions/edit-paste.png"
xlink:href="/home/david/Oxygen/trunk/32x32/actions/edit-paste.png" />
<path
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:3.99999976;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;opacity:0.70792079;filter:url(#filter4715)"
d="M 57.53125 79.25 L 39.71875 110.0625 C 37.370936 114.12633 34.175817 117.22417 30.71875 118.96875 C 27.26168 120.71333 23.37011 121.11751 20.1875 119.28125 C 17.00494 117.44502 15.40875 113.86699 15.1875 110 C 15.119229 108.80676 15.194921 107.57094 15.375 106.3125 C 14.812237 108.42607 14.574653 110.52767 14.6875 112.5 C 14.90875 116.36699 16.50494 119.94502 19.6875 121.78125 C 22.87011 123.61751 26.76168 123.21333 30.21875 121.46875 C 33.675817 119.72417 36.870936 116.62633 39.21875 112.5625 L 57.40625 81.09375 C 57.67315 80.641816 57.75171 80.103085 57.625 79.59375 L 57.53125 79.25 z M 31.40625 96.53125 C 29.496167 96.399901 26.05012 98.38292 23.90625 102.09375 C 22.840787 103.93797 22.266248 105.85407 22.125 107.5 C 22.422918 106.54782 22.850187 105.55624 23.40625 104.59375 C 25.55012 100.88292 28.996167 98.899901 30.90625 99.03125 C 31.322899 99.0599 31.689226 99.159631 32.0625 99.375 C 32.970397 99.898826 33.607273 101.04898 33.6875 102.90625 C 33.703141 103.26833 33.690955 103.66277 33.65625 104.0625 C 34.065399 102.74599 34.233925 101.48097 34.1875 100.40625 C 34.107273 98.54898 33.470397 97.398826 32.5625 96.875 C 32.189226 96.659631 31.822899 96.5599 31.40625 96.53125 z "
id="path4576" />
<path
id="path4609"
d="M 57.53125,79.25 L 39.71875,110.0625 C 37.370936,114.12633 34.175817,117.22417 30.71875,118.96875 C 27.26168,120.71333 23.37011,121.11751 20.1875,119.28125 C 17.00494,117.44502 15.40875,113.86699 15.1875,110 C 15.119229,108.80676 15.194921,107.57094 15.375,106.3125 C 14.812237,108.42607 14.574653,110.52767 14.6875,112.5 C 14.90875,116.36699 16.50494,119.94502 19.6875,121.78125 C 22.87011,123.61751 26.76168,123.21333 30.21875,121.46875 C 33.675817,119.72417 36.870936,116.62633 39.21875,112.5625 L 57.40625,81.09375 C 57.67315,80.641816 57.75171,80.103085 57.625,79.59375 L 57.53125,79.25 z M 31.40625,96.53125 C 29.496167,96.399901 26.05012,98.38292 23.90625,102.09375 C 22.840787,103.93797 22.266248,105.85407 22.125,107.5 C 22.422918,106.54782 22.850187,105.55624 23.40625,104.59375 C 25.55012,100.88292 28.996167,98.899901 30.90625,99.03125 C 31.322899,99.0599 31.689226,99.159631 32.0625,99.375 C 32.970397,99.898826 33.607273,101.04898 33.6875,102.90625 C 33.703141,103.26833 33.690955,103.66277 33.65625,104.0625 C 34.065399,102.74599 34.233925,101.48097 34.1875,100.40625 C 34.107273,98.54898 33.470397,97.398826 32.5625,96.875 C 32.189226,96.659631 31.822899,96.5599 31.40625,96.53125 z "
style="opacity:0.70792081;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:3.99999976;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter4711)"
transform="matrix(-1,0,0,1,128.84431,0)" />
<path
style="opacity:0.48514851;fill:url(#linearGradient4641);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:3.99999976;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter4691)"
d="M 33.1875,106.4375 C 32.888012,107.43505 32.455199,108.46448 31.875,109.46875 C 30.554983,111.75356 28.74309,113.45249 27.09375,114.3125 C 25.44441,115.17251 24.15784,115.18004 23.25,114.65625 C 22.3421,114.13243 21.67398,112.98227 21.59375,111.125 C 21.212006,112.3879 21.048791,113.58424 21.09375,114.625 C 21.17398,116.48227 21.8421,117.63243 22.75,118.15625 C 23.65784,118.68004 24.94441,118.67251 26.59375,117.8125 C 28.24309,116.95249 30.054983,115.25356 31.375,112.96875 C 32.687576,110.6968 33.260229,108.29075 33.1875,106.4375 z "
id="path4626"
sodipodi:nodetypes="cssscssssc"
transform="matrix(1.6395402,0,0,1.5188129,-15.9999,-58.979717)" />
<path
transform="matrix(-1.6395402,0,0,1.5188129,144.99452,-58.979717)"
sodipodi:nodetypes="cssscssssc"
id="path4699"
d="M 33.1875,106.4375 C 32.888012,107.43505 32.455199,108.46448 31.875,109.46875 C 30.554983,111.75356 28.74309,113.45249 27.09375,114.3125 C 25.44441,115.17251 24.15784,115.18004 23.25,114.65625 C 22.3421,114.13243 21.67398,112.98227 21.59375,111.125 C 21.212006,112.3879 21.048791,113.58424 21.09375,114.625 C 21.17398,116.48227 21.8421,117.63243 22.75,118.15625 C 23.65784,118.68004 24.94441,118.67251 26.59375,117.8125 C 28.24309,116.95249 30.054983,115.25356 31.375,112.96875 C 32.687576,110.6968 33.260229,108.29075 33.1875,106.4375 z "
style="opacity:0.48514851;fill:url(#linearGradient4701);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:3.99999976;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter4691)" />
<path
style="opacity:0.77722772;fill:none;fill-rule:evenodd;stroke:#ffffff;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 65.770092,66.282577 L 62.183456,70.557029"
id="path4730" />
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 43 KiB

3302
imgsrc/edit-paste.svg Normal file

File diff suppressed because it is too large Load Diff

After

Width:  |  Height:  |  Size: 88 KiB

View File

@ -1,722 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://web.resource.org/cc/"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="128"
height="128"
id="svg2606"
sodipodi:version="0.32"
inkscape:version="0.45.1"
version="1.0"
sodipodi:docname="edit-undo.svgz"
inkscape:output_extension="org.inkscape.output.svgz.inkscape"
sodipodi:docbase="/home/david/oxygen/trunk/scalable/actions"
inkscape:export-filename="edit-undo.png"
inkscape:export-xdpi="11.25"
inkscape:export-ydpi="11.25">
<defs
id="defs2608">
<linearGradient
inkscape:collect="always"
id="linearGradient3342">
<stop
style="stop-color:#000000;stop-opacity:1;"
offset="0"
id="stop3344" />
<stop
style="stop-color:#000000;stop-opacity:0;"
offset="1"
id="stop3347" />
</linearGradient>
<linearGradient
inkscape:collect="always"
id="linearGradient3326">
<stop
style="stop-color:#000000;stop-opacity:1;"
offset="0"
id="stop3328" />
<stop
style="stop-color:#000000;stop-opacity:0;"
offset="1"
id="stop3330" />
</linearGradient>
<linearGradient
id="linearGradient3825">
<stop
style="stop-color:#ffffff;stop-opacity:0;"
offset="0"
id="stop3827" />
<stop
id="stop3833"
offset="0.5"
style="stop-color:#ffffff;stop-opacity:0.18705036;" />
<stop
style="stop-color:#ffffff;stop-opacity:1;"
offset="1"
id="stop3829" />
</linearGradient>
<linearGradient
id="linearGradient3751">
<stop
style="stop-color:#beba2c;stop-opacity:1;"
offset="0"
id="stop3753" />
<stop
style="stop-color:#b6be2c;stop-opacity:0;"
offset="1"
id="stop3755" />
</linearGradient>
<linearGradient
id="linearGradient3528">
<stop
style="stop-color:#eaf209;stop-opacity:1;"
offset="0"
id="stop3530" />
<stop
style="stop-color:#c7c634;stop-opacity:0;"
offset="1"
id="stop3532" />
</linearGradient>
<linearGradient
id="linearGradient3295">
<stop
style="stop-color:#fffe63;stop-opacity:1;"
offset="0"
id="stop3297" />
<stop
style="stop-color:#ffffff;stop-opacity:0;"
offset="1"
id="stop3299" />
</linearGradient>
<linearGradient
id="linearGradient3202">
<stop
style="stop-color:#fcff9c;stop-opacity:1;"
offset="0"
id="stop3204" />
<stop
style="stop-color:#c1a965;stop-opacity:0;"
offset="1"
id="stop3206" />
</linearGradient>
<radialGradient
id="XMLID_4_"
cx="48"
cy="-0.2148"
r="55.148"
gradientTransform="matrix(0.9792,0,0,0.9725,133.0002,20.8762)"
gradientUnits="userSpaceOnUse">
<stop
offset="0"
style="stop-color:#cfd13d;stop-opacity:1;"
id="stop3082" />
<stop
offset="1"
style="stop-color:#db8900;stop-opacity:1;"
id="stop3090" />
</radialGradient>
<linearGradient
id="linearGradient3260"
inkscape:collect="always">
<stop
id="stop3262"
offset="0"
style="stop-color:#ffffff;stop-opacity:1;" />
<stop
id="stop3264"
offset="1"
style="stop-color:#ffffff;stop-opacity:0;" />
</linearGradient>
<linearGradient
id="linearGradient3344">
<stop
id="stop3346"
offset="0"
style="stop-color:#fdff63;stop-opacity:1;" />
<stop
id="stop3348"
offset="1"
style="stop-color:#ffffff;stop-opacity:0;" />
</linearGradient>
<linearGradient
id="linearGradient3449">
<stop
id="stop3451"
offset="0"
style="stop-color:#000000;stop-opacity:1;" />
<stop
id="stop3453"
offset="1"
style="stop-color:#000000;stop-opacity:0;" />
</linearGradient>
<linearGradient
inkscape:collect="always"
xlink:href="#XMLID_4_"
id="linearGradient3516"
x1="147.09375"
y1="33.40625"
x2="8.083992"
y2="123.90625"
gradientUnits="userSpaceOnUse" />
<radialGradient
inkscape:collect="always"
xlink:href="#XMLID_4_"
id="radialGradient3524"
cx="67.09375"
cy="116.90625"
fx="67.09375"
fy="116.90625"
r="56"
gradientUnits="userSpaceOnUse" />
<radialGradient
inkscape:collect="always"
xlink:href="#linearGradient3528"
id="radialGradient3535"
cx="99.726295"
cy="27.418272"
fx="64.689766"
fy="68.231934"
r="56"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.693735,5.8671246e-2,-3.6242796e-2,0.4285387,33.939389,26.8809)" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3295"
id="linearGradient3548"
x1="75.09375"
y1="4.5317035"
x2="75.09375"
y2="80.172485"
gradientUnits="userSpaceOnUse" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3260"
id="linearGradient3581"
gradientUnits="userSpaceOnUse"
gradientTransform="translate(4.2161108e-3,1.9705695e-3)"
x1="75.09375"
y1="4.5317035"
x2="75.09375"
y2="80.172485" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3295"
id="linearGradient3613"
x1="208.59375"
y1="130.40625"
x2="208.59375"
y2="63.426777"
gradientUnits="userSpaceOnUse"
gradientTransform="translate(-137,0)" />
<filter
inkscape:collect="always"
id="filter3639">
<feGaussianBlur
inkscape:collect="always"
stdDeviation="1.0580524"
id="feGaussianBlur3641" />
</filter>
<radialGradient
inkscape:collect="always"
xlink:href="#linearGradient3449"
id="radialGradient3654"
cx="-10.165252"
cy="66.906013"
fx="-10.165252"
fy="66.906013"
r="59.995121"
gradientTransform="matrix(0.4582893,-2.1035589e-8,4.5903973e-8,1.0000813,20.447953,-5.1974351e-3)"
gradientUnits="userSpaceOnUse" />
<radialGradient
inkscape:collect="always"
xlink:href="#linearGradient3449"
id="radialGradient3658"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.2676699,0.521376,-0.7037472,0.3612977,108.99386,-36.062981)"
cx="167.67001"
cy="80.404922"
fx="167.67001"
fy="80.404922"
r="59.995121" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3260"
id="linearGradient3676"
x1="120.0625"
y1="12.569496"
x2="125.30366"
y2="14.444496"
gradientUnits="userSpaceOnUse"
spreadMethod="reflect" />
<filter
inkscape:collect="always"
x="-0.15096202"
width="1.301924"
y="-0.13732364"
height="1.2746473"
id="filter3738">
<feGaussianBlur
inkscape:collect="always"
stdDeviation="0.39257441"
id="feGaussianBlur3740" />
</filter>
<radialGradient
inkscape:collect="always"
xlink:href="#linearGradient3449"
id="radialGradient3744"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.982366,1.671718e-2,-3.5801148e-3,0.2103843,-18.56344,30.477792)"
cx="72.684891"
cy="48.228905"
fx="74.871155"
fy="26.862719"
r="59.995121" />
<radialGradient
inkscape:collect="always"
xlink:href="#linearGradient3751"
id="radialGradient3757"
cx="66.01458"
cy="126.69183"
fx="66.01458"
fy="126.69183"
r="59.99512"
gradientTransform="matrix(0.675025,0,0,0.3583625,19.527377,41.004647)"
gradientUnits="userSpaceOnUse" />
<radialGradient
inkscape:collect="always"
xlink:href="#linearGradient3260"
id="radialGradient3767"
cx="64.088867"
cy="7.4108429"
fx="64.088867"
fy="7.4108429"
r="59.995121"
gradientTransform="matrix(0.3093869,0,0,0.4779247,44.260611,3.8644223)"
gradientUnits="userSpaceOnUse" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3344"
id="linearGradient3771"
gradientUnits="userSpaceOnUse"
spreadMethod="reflect"
x1="120.0625"
y1="12.569496"
x2="125.30366"
y2="14.444496" />
<filter
inkscape:collect="always"
id="filter3438">
<feGaussianBlur
inkscape:collect="always"
stdDeviation="1.3342697"
id="feGaussianBlur3440" />
</filter>
<filter
inkscape:collect="always"
id="filter3630">
<feGaussianBlur
inkscape:collect="always"
stdDeviation="0.89883985"
id="feGaussianBlur3632" />
</filter>
<radialGradient
inkscape:collect="always"
xlink:href="#linearGradient3825"
id="radialGradient2361"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.6484284,0.1017206,-3.1257154e-2,0.1992521,-4.56257,53.15916)"
cx="-112.17241"
cy="118.60459"
fx="-113.14772"
fy="59.708473"
r="59.99512" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3326"
id="linearGradient2363"
gradientUnits="userSpaceOnUse"
x1="-151.43935"
y1="37.68198"
x2="-152.26776"
y2="57.25" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3326"
id="linearGradient2365"
gradientUnits="userSpaceOnUse"
x1="-132.51041"
y1="39.803303"
x2="-158.92462"
y2="72.881729" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3326"
id="linearGradient2367"
gradientUnits="userSpaceOnUse"
x1="-83.012932"
y1="44.753052"
x2="-158.92462"
y2="72.881729" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3342"
id="linearGradient3349"
x1="-73"
y1="105.625"
x2="-163"
y2="86.125"
gradientUnits="userSpaceOnUse" />
<filter
inkscape:collect="always"
x="-0.087741371"
width="1.1754827"
y="-0.10211017"
height="1.2042203"
id="filter3363">
<feGaussianBlur
inkscape:collect="always"
stdDeviation="3.0526685"
id="feGaussianBlur3365" />
</filter>
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3342"
id="linearGradient3372"
gradientUnits="userSpaceOnUse"
gradientTransform="translate(-40.5,-1.5)"
x1="-83.593941"
y1="137.13324"
x2="-138.0043"
y2="92.603989" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3342"
id="linearGradient3376"
gradientUnits="userSpaceOnUse"
gradientTransform="translate(-40.5,-1.5)"
x1="-61.802711"
y1="99.979607"
x2="-136.51074"
y2="112.70422" />
<radialGradient
inkscape:collect="always"
xlink:href="#linearGradient3825"
id="radialGradient3388"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.6484284,0.1017206,-3.1257154e-2,0.1992521,-4.56257,53.15916)"
cx="-112.17241"
cy="118.60459"
fx="-113.14772"
fy="59.708473"
r="59.99512" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3326"
id="linearGradient3390"
gradientUnits="userSpaceOnUse"
x1="-151.43935"
y1="37.68198"
x2="-152.26776"
y2="57.25" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3326"
id="linearGradient3392"
gradientUnits="userSpaceOnUse"
x1="-132.51041"
y1="39.803303"
x2="-158.92462"
y2="72.881729" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3326"
id="linearGradient3394"
gradientUnits="userSpaceOnUse"
x1="-83.012932"
y1="44.753052"
x2="-158.92462"
y2="72.881729" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3342"
id="linearGradient3396"
gradientUnits="userSpaceOnUse"
x1="-73"
y1="105.625"
x2="-163"
y2="86.125" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3342"
id="linearGradient3398"
gradientUnits="userSpaceOnUse"
gradientTransform="translate(-40.5,-1.5)"
x1="-83.593941"
y1="137.13324"
x2="-138.0043"
y2="92.603989" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3342"
id="linearGradient3400"
gradientUnits="userSpaceOnUse"
gradientTransform="translate(-40.5,-1.5)"
x1="-61.802711"
y1="99.979607"
x2="-136.51074"
y2="112.70422" />
<radialGradient
inkscape:collect="always"
xlink:href="#linearGradient3825"
id="radialGradient3422"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.6484284,0.1017206,-3.1257154e-2,0.1992521,-4.56257,53.15916)"
cx="-112.17241"
cy="118.60459"
fx="-113.14772"
fy="59.708473"
r="59.99512" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3326"
id="linearGradient3424"
gradientUnits="userSpaceOnUse"
x1="-151.43935"
y1="37.68198"
x2="-152.26776"
y2="57.25" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3326"
id="linearGradient3426"
gradientUnits="userSpaceOnUse"
x1="-132.51041"
y1="39.803303"
x2="-158.92462"
y2="72.881729" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3326"
id="linearGradient3428"
gradientUnits="userSpaceOnUse"
x1="-83.012932"
y1="44.753052"
x2="-158.92462"
y2="72.881729" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3342"
id="linearGradient3430"
gradientUnits="userSpaceOnUse"
x1="-73"
y1="105.625"
x2="-163"
y2="86.125" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3342"
id="linearGradient3432"
gradientUnits="userSpaceOnUse"
gradientTransform="translate(-40.5,-1.5)"
x1="-83.593941"
y1="137.13324"
x2="-138.0043"
y2="92.603989" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3342"
id="linearGradient3434"
gradientUnits="userSpaceOnUse"
gradientTransform="translate(-40.5,-1.5)"
x1="-61.802711"
y1="99.979607"
x2="-136.51074"
y2="112.70422" />
<mask
maskUnits="userSpaceOnUse"
id="mask3402">
<g
id="g3404"
transform="translate(167.50257,-3.755156e-3)">
<g
id="g3406"
transform="translate(80.51637,30.885255)">
<path
style="opacity:1;fill:url(#radialGradient3422);fill-opacity:1;stroke:none;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dashoffset:1.08779998;stroke-opacity:1"
d="M -184.42232,-32.47243 C -217.54751,-32.47243 -248.42232,-0.097625 -248.42232,33.02757 C -248.42232,66.15276 -217.04751,97.02757 -183.92232,97.02757 C -153.6332,97.02757 -128.58571,70.81131 -124.51607,41.68382 L -159.54732,36.65257 C -161.172,48.6137 -171.47739,57.62132 -183.92232,57.62132 C -197.49395,57.62132 -206.01607,46.0992 -206.01607,32.52757 C -206.01607,18.955936 -199.99395,12.43382 -186.42232,12.43382 C -179.6365,12.433819 -176.50103,10.198864 -172.04732,14.65257 L -176.8745,25.979749 C -178.93037,28.035619 -179.11822,29.285529 -178.55411,30.595278 C -178.04554,31.776057 -177.03338,33.12132 -174.34438,33.12132 L -130.39107,33.12132 C -126.54518,33.12132 -123.93208,30.466941 -123.93208,26.871189 L -124.00095,-17.206829 C -124.00095,-19.687584 -124.90346,-21.050058 -126.18242,-21.556444 C -127.49674,-22.076829 -129.21563,-21.679122 -131.28951,-19.605244 L -141.48482,-19.40993 C -152.34417,-30.269281 -167.85972,-32.47243 -184.42232,-32.47243 z "
id="path3408"
sodipodi:nodetypes="cssccsssccsccccsccc" />
<path
sodipodi:nodetypes="ccccc"
style="fill:url(#linearGradient3424);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter3438)"
d="M -161.5,34.5 C -162,37 -180,54 -180,54 L -132.5,80 L -112,38.5 L -161.5,34.5 z "
id="path3410" />
<path
sodipodi:nodetypes="ccccc"
id="path3412"
d="M -161.5,34.5 C -162,37 -181.27817,54.389087 -181.27817,54.389087 L -151.62742,97.591883 L -112,38.5 L -161.5,34.5 z "
style="fill:url(#linearGradient3426);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter3438)" />
<path
style="fill:url(#linearGradient3428);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter3438)"
d="M -161.5,34.5 C -162,37 -181.27817,54.389087 -181.27817,54.389087 L -151.62742,97.591883 L -112,38.5 L -161.5,34.5 z "
id="path3414"
sodipodi:nodetypes="ccccc" />
</g>
<rect
y="69.75"
x="-119"
height="71.75"
width="83.5"
id="rect3416"
style="opacity:1;fill:url(#linearGradient3430);fill-opacity:1;stroke:none;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:1.08779998;stroke-opacity:1;filter:url(#filter3363)" />
<path
sodipodi:nodetypes="ccccc"
id="path3418"
d="M -159.5,68.25 L -39.138259,55.983708 L -93.453327,162.55286 L -197.79465,128.96507 L -159.5,68.25 z "
style="fill:url(#linearGradient3432);fill-opacity:1;stroke:none;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dashoffset:1.08779998;stroke-opacity:1;filter:url(#filter3363)"
transform="matrix(0.6393762,0.7688941,-0.7688941,0.6393762,37.597642,128.08723)" />
<path
transform="matrix(0.6393762,0.7688941,-0.7688941,0.6393762,37.597642,128.08723)"
style="fill:url(#linearGradient3434);fill-opacity:1;stroke:none;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dashoffset:1.08779998;stroke-opacity:1;filter:url(#filter3363)"
d="M -159.5,68.25 L -39.138259,55.983708 L -93.453327,162.55286 L -197.79465,128.96507 L -159.5,68.25 z "
id="path3420"
sodipodi:nodetypes="ccccc" />
</g>
</mask>
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3751"
id="linearGradient3565"
x1="-267.47665"
y1="18.103027"
x2="-33.476654"
y2="18.103027"
gradientUnits="userSpaceOnUse" />
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient3295"
id="linearGradient3567"
gradientUnits="userSpaceOnUse"
gradientTransform="translate(-137,0)"
x1="208.59375"
y1="130.40625"
x2="208.59375"
y2="63.426777" />
</defs>
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="4.6484375"
inkscape:cx="64"
inkscape:cy="64"
inkscape:document-units="px"
inkscape:current-layer="layer1"
width="128px"
height="128px"
gridspacingx="4px"
gridspacingy="4px"
gridempspacing="2"
showgrid="false"
inkscape:grid-points="true"
showguides="true"
inkscape:guide-bbox="true"
inkscape:window-width="748"
inkscape:window-height="681"
inkscape:window-x="526"
inkscape:window-y="51" />
<metadata
id="metadata2611">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Livello 1"
inkscape:groupmode="layer"
id="layer1">
<g
id="g3835"
mask="url(#mask3402)"
transform="matrix(-1,0,0,1,128.17774,0)">
<path
sodipodi:nodetypes="cssccsssccsccccsccc"
id="rect3204"
d="M 64.09375,3.90625 C 30.968558,3.9062499 4.0937499,30.781055 4.09375,63.90625 C 4.0937501,97.031442 30.96856,123.90625 64.09375,123.90625 C 94.382866,123.90625 119.43036,101.68999 123.5,72.5625 L 88.46875,67.53125 C 86.844066,79.492379 76.538676,88.5 64.09375,88.5 C 50.522122,88.499999 39.5,77.477881 39.5,63.90625 C 39.500001,50.334616 50.522119,39.3125 64.09375,39.3125 C 70.879568,39.312499 77.015044,42.077544 81.46875,46.53125 L 71.141571,56.858429 C 69.085701,58.914299 68.897846,60.164209 69.461963,61.473958 C 69.970531,62.654737 70.982695,64 73.671688,64 L 117.625,64 C 121.47089,64 124.08399,61.345621 124.08399,57.749869 L 124.01512,13.671851 C 124.01512,11.191096 123.11261,9.8286218 121.83365,9.3222363 C 120.51933,8.8018514 118.80044,9.1995576 116.72656,11.273436 L 106.53125,21.46875 C 95.671902,10.609399 80.656349,3.90625 64.09375,3.90625 z "
style="opacity:1;fill:url(#linearGradient3516);fill-opacity:1;stroke:none;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dashoffset:1.08779998;stroke-opacity:1" />
<path
style="opacity:0.79775277;fill:url(#radialGradient3757);fill-opacity:1;stroke:none;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dashoffset:1.08779998;stroke-opacity:1"
d="M 64.09375,3.90625 C 30.968558,3.9062499 4.0937499,30.781055 4.09375,63.90625 C 4.0937501,97.031442 30.96856,123.90625 64.09375,123.90625 C 94.382866,123.90625 119.43036,101.68999 123.5,72.5625 L 88.46875,67.53125 C 86.844066,79.492379 76.538676,88.5 64.09375,88.5 C 50.522122,88.499999 39.5,77.477881 39.5,63.90625 C 39.500001,50.334616 50.522119,39.3125 64.09375,39.3125 C 70.879568,39.312499 77.015044,42.077544 81.46875,46.53125 L 71.141571,56.858429 C 69.085701,58.914299 68.897846,60.164209 69.461963,61.473958 C 69.970531,62.654737 70.982695,64 73.671688,64 L 117.625,64 C 121.47089,64 124.08399,61.345621 124.08399,57.749869 L 124.01512,13.671851 C 124.01512,11.191096 123.11261,9.8286218 121.83365,9.3222363 C 120.51933,8.8018514 118.80044,9.1995576 116.72656,11.273436 L 106.53125,21.46875 C 95.671902,10.609399 80.656349,3.90625 64.09375,3.90625 z "
id="path3749"
sodipodi:nodetypes="cssccsssccsccccsccc" />
<path
sodipodi:nodetypes="cssccsssccsccccsccc"
id="path3656"
d="M 64.09375,3.90625 C 30.968558,3.9062499 4.0937499,30.781055 4.09375,63.90625 C 4.0937501,97.031442 30.96856,123.90625 64.09375,123.90625 C 94.382866,123.90625 119.43036,101.68999 123.5,72.5625 L 88.46875,67.53125 C 86.844066,79.492379 76.538676,88.5 64.09375,88.5 C 50.522122,88.499999 39.5,77.477881 39.5,63.90625 C 39.500001,50.334616 50.522119,39.3125 64.09375,39.3125 C 70.879568,39.312499 77.015044,42.077544 81.46875,46.53125 L 71.141571,56.858429 C 69.085701,58.914299 68.897846,60.164209 69.461963,61.473958 C 69.970531,62.654737 70.982695,64 73.671688,64 L 117.625,64 C 121.47089,64 124.08399,61.345621 124.08399,57.749869 L 124.01512,13.671851 C 124.01512,11.191096 123.11261,9.8286218 121.83365,9.3222363 C 120.51933,8.8018514 118.80044,9.1995576 116.72656,11.273436 L 106.53125,21.46875 C 95.671902,10.609399 80.656349,3.90625 64.09375,3.90625 z "
style="opacity:0.68913861;fill:url(#radialGradient3658);fill-opacity:1;stroke:none;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dashoffset:1.08779998;stroke-opacity:1" />
<path
style="opacity:1;fill:url(#radialGradient3654);fill-opacity:1;stroke:none;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dashoffset:1.08779998;stroke-opacity:1"
d="M 64.09375,3.90625 C 30.968558,3.9062499 4.0937499,30.781055 4.09375,63.90625 C 4.0937501,97.031442 30.96856,123.90625 64.09375,123.90625 C 94.382866,123.90625 119.43036,101.68999 123.5,72.5625 L 88.46875,67.53125 C 86.844066,79.492379 76.538676,88.5 64.09375,88.5 C 50.522122,88.499999 39.5,77.477881 39.5,63.90625 C 39.500001,50.334616 50.522119,39.3125 64.09375,39.3125 C 70.879568,39.312499 77.015044,42.077544 81.46875,46.53125 L 71.141571,56.858429 C 69.085701,58.914299 68.897846,60.164209 69.461963,61.473958 C 69.970531,62.654737 70.982695,64 73.671688,64 L 117.625,64 C 121.47089,64 124.08399,61.345621 124.08399,57.749869 L 124.01512,13.671851 C 124.01512,11.191096 123.11261,9.8286218 121.83365,9.3222363 C 120.51933,8.8018514 118.80044,9.1995576 116.72656,11.273436 L 106.53125,21.46875 C 95.671902,10.609399 80.656349,3.90625 64.09375,3.90625 z "
id="path3643"
sodipodi:nodetypes="cssccsssccsccccsccc" />
<path
sodipodi:nodetypes="cssccsssccsccccsccc"
id="path3742"
d="M 64.09375,3.90625 C 30.968558,3.9062499 4.0937499,30.781055 4.09375,63.90625 C 4.0937501,97.031442 30.96856,123.90625 64.09375,123.90625 C 94.382866,123.90625 119.43036,101.68999 123.5,72.5625 L 88.46875,67.53125 C 86.844066,79.492379 76.538676,88.5 64.09375,88.5 C 50.522122,88.499999 39.5,77.477881 39.5,63.90625 C 39.500001,50.334616 50.522119,39.3125 64.09375,39.3125 C 70.879568,39.312499 77.015044,42.077544 81.46875,46.53125 L 71.141571,56.858429 C 69.085701,58.914299 68.897846,60.164209 69.461963,61.473958 C 69.970531,62.654737 70.982695,64 73.671688,64 L 117.625,64 C 121.47089,64 124.08399,61.345621 124.08399,57.749869 L 124.01512,13.671851 C 124.01512,11.191096 123.11261,9.8286218 121.83365,9.3222363 C 120.51933,8.8018514 118.80044,9.1995576 116.72656,11.273436 L 106.53125,21.46875 C 95.671902,10.609399 80.656349,3.90625 64.09375,3.90625 z "
style="opacity:0.79775277;fill:url(#radialGradient3744);fill-opacity:1;stroke:none;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dashoffset:1.08779998;stroke-opacity:1" />
<path
style="opacity:0.74531836;fill:url(#radialGradient3767);fill-opacity:1;stroke:none;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dashoffset:1.08779998;stroke-opacity:1;filter:url(#filter3630)"
d="M 64.09375,4.20625 C 30.968558,4.2062499 4.0937499,30.781055 4.09375,63.90625 C 4.0937501,97.031442 30.96856,123.90625 64.09375,123.90625 C 94.382866,123.90625 119.43036,101.68999 123.5,72.5625 L 88.46875,67.53125 C 86.844066,79.492379 76.538676,88.5 64.09375,88.5 C 50.522122,88.499999 39.5,77.477881 39.5,63.90625 C 39.500001,50.334616 50.522119,39.3125 64.09375,39.3125 C 70.879568,39.312499 77.015044,42.077544 81.46875,46.53125 L 71.141571,56.858429 C 69.085701,58.914299 68.897846,60.164209 69.461963,61.473958 C 69.970531,62.654737 70.982695,64 73.671688,64 L 117.625,64 C 121.47089,64 124.08399,61.345621 124.08399,57.749869 L 124.01512,13.671851 C 124.01512,11.191096 123.11261,9.8286218 121.83365,9.3222363 C 120.51933,8.8018514 118.80044,9.1995576 116.72656,11.273436 L 106.53125,21.46875 C 95.671902,10.609399 80.656349,4.20625 64.09375,4.20625 z "
id="path3759"
sodipodi:nodetypes="cssccsssccsccccsccc" />
<path
sodipodi:nodetypes="cccczc"
id="path3660"
d="M 117.6875,10.75 L 118.625,15.125 L 119.875,16 L 123.875,13.375 C 124.12188,11.651249 123.52383,10.027571 121.9375,9.3749999 C 120.35116,8.7224285 118.77622,9.5017032 117.6875,10.75 z "
style="opacity:0.82022472;fill:url(#linearGradient3676);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter3738)" />
<path
transform="matrix(0,1,1,0,60.363582,-60.363586)"
style="opacity:0.82022472;fill:url(#linearGradient3771);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter3738)"
d="M 117.6875,10.75 L 119.875,13.875 L 120.375,13.75 L 123.875,13.375 C 124.12188,11.651249 123.52383,10.027571 121.9375,9.3749999 C 120.35116,8.7224285 118.77622,9.5017032 117.6875,10.75 z "
id="path3769"
sodipodi:nodetypes="cccczc" />
<path
id="path3494"
d="M 64.09375,7.90625 C 33.132052,7.9062499 8.0937499,32.944549 8.09375,63.90625 C 8.0937501,94.867948 33.132054,119.90625 64.09375,119.90625 C 91.026646,119.90625 113.21548,101.0995 118.625,75.90625 L 91.5,72.03125 C 88.061436,83.928551 77.059621,92.5 64.09375,92.5 C 48.356404,92.499999 35.5,79.643599 35.5,63.90625 C 35.500001,48.168899 48.356402,35.3125 64.09375,35.3125 C 71.966166,35.312499 79.145304,38.520304 84.3125,43.6875 C 85.071964,44.438909 85.499997,45.462886 85.5,46.53125 C 85.5,47.599614 85.071964,48.623591 84.3125,49.375 L 73.6875,60 L 117.625,60 C 119.63039,60 120.09375,59.407836 120.09375,57.75 L 120,13.65625 L 109.375,24.3125 C 108.62359,25.071964 107.59961,25.5 106.53125,25.5 C 105.46289,25.5 104.43891,25.071964 103.6875,24.3125 C 93.549835,14.174833 79.577106,7.90625 64.09375,7.90625 z "
style="opacity:1;fill:url(#radialGradient3524);fill-opacity:1;stroke:none;stroke-width:4;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:1.08779998;stroke-opacity:1" />
<path
style="opacity:1;fill:url(#radialGradient3535);fill-opacity:1;stroke:none;stroke-width:4;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:1.08779998;stroke-opacity:1"
d="M 64.09375,7.90625 C 33.132052,7.9062499 8.0937499,32.944549 8.09375,63.90625 C 8.0937501,94.867948 33.132054,119.90625 64.09375,119.90625 C 91.026646,119.90625 113.21548,101.0995 118.625,75.90625 L 91.5,72.03125 C 88.061436,83.928551 77.059621,92.5 64.09375,92.5 C 48.356404,92.499999 35.5,79.643599 35.5,63.90625 C 35.500001,48.168899 48.356402,35.3125 64.09375,35.3125 C 71.966166,35.312499 79.145304,38.520304 84.3125,43.6875 C 85.071964,44.438909 85.499997,45.462886 85.5,46.53125 C 85.5,47.599614 85.071964,48.623591 84.3125,49.375 L 73.6875,60 L 117.625,60 C 119.63039,60 120.09375,59.407836 120.09375,57.75 L 120,13.65625 L 109.375,24.3125 C 108.62359,25.071964 107.59961,25.5 106.53125,25.5 C 105.46289,25.5 104.43891,25.071964 103.6875,24.3125 C 93.549835,14.174833 79.577106,7.90625 64.09375,7.90625 z "
id="path3526" />
<path
sodipodi:nodetypes="csccssccccccscc"
id="path3537"
d="M 64.09375,7.90625 C 33.132052,7.9062499 8.0937499,32.944549 8.09375,63.90625 C 8.09375,64.474122 8.1082724,65.029981 8.125,65.59375 C 14.11447,66.271402 20.266218,66.74388 26.53125,67 C 26.260548,56.540958 30.202859,46.025084 38.34375,38.21875 C 53.683067,23.509813 78.072313,24.004431 92.78125,39.34375 C 95.545099,42.226046 97.537852,45.032117 99.34375,48.59375 L 78.84375,59 L 98,59 C 105.9282,56.973373 113.18621,55.563033 120.09375,52.8125 L 120,13.65625 L 109.375,24.3125 C 108.62359,25.071964 107.59961,25.5 106.53125,25.5 C 105.46289,25.5 104.43891,25.071964 103.6875,24.3125 C 93.549835,14.174833 79.577106,7.90625 64.09375,7.90625 z "
style="opacity:1;fill:url(#linearGradient3548);fill-opacity:1;stroke:none;stroke-width:8;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:1.08779998;stroke-opacity:1" />
<path
sodipodi:nodetypes="cscscscccccccccccssssssccscscc"
id="path3553"
d="M 64.099866,7.9087646 C 33.138176,7.9087644 8.0998661,32.947063 8.0998661,63.908764 C 8.0998761,64.087476 8.0973761,64.263059 8.0998661,64.440014 C 8.3531061,33.696509 33.295846,8.9087645 64.099866,8.9087646 C 79.583236,8.9087645 93.555946,15.177347 103.69361,25.315014 C 104.44503,26.074479 105.469,26.502514 106.53736,26.502514 C 107.60573,26.502515 108.6297,26.074478 109.38111,25.315014 L 119.50611,15.158764 L 119.99986,52.708764 C 113.09232,55.459294 105.43431,56.569624 97.506116,58.596264 L 78.849866,59.002514 L 98.006116,59.002514 C 105.93431,56.975884 113.19232,55.565544 120.09986,52.815014 L 120.00611,14.658764 L 120.00611,13.658764 L 119.50611,14.158764 L 109.38111,24.315014 C 108.62971,25.074479 107.60572,25.502514 106.53736,25.502514 C 105.46901,25.502515 104.44502,25.074478 103.69361,24.315014 C 103.68314,24.304548 103.67283,24.294222 103.66236,24.283764 C 103.60999,24.231473 103.55869,24.179598 103.50611,24.127514 C 102.93231,23.559643 102.35524,23.012364 101.75611,22.471264 C 101.67459,22.397145 101.58807,22.326157 101.50611,22.252514 C 91.590066,13.342335 78.496526,7.9087646 64.099866,7.9087646 z M 63.443616,27.127514 C 54.205446,27.378034 45.040176,30.920194 37.849866,37.815014 C 30.217786,45.133448 26.722316,55.187931 26.537366,65.033764 C 26.777246,55.231884 30.717786,45.539698 38.349866,38.221264 C 51.665996,25.452364 71.803196,24.123207 86.506116,34.033764 C 79.627056,29.22869 71.518656,26.908534 63.443616,27.127514 z "
style="opacity:1;fill:url(#linearGradient3581);fill-opacity:1;stroke:none;stroke-width:8;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:1.08779998;stroke-opacity:1" />
<path
id="path3603"
d="M 63.59375,7.90625 C 32.63205,7.9062499 7.59375,32.944549 7.59375,63.90625 C 7.59375,94.867948 32.63205,119.90625 63.59375,119.90625 C 90.52665,119.90625 112.71548,101.0995 118.125,75.90625 L 91,72.03125 C 87.56144,83.928551 76.55962,92.5 63.59375,92.5 C 47.8564,92.499999 35,79.643599 35,63.90625 C 35,48.168899 47.8564,35.3125 63.59375,35.3125 C 71.46617,35.312499 78.6453,38.520304 83.8125,43.6875 C 84.57196,44.438909 85,45.462886 85,46.53125 C 85,47.599614 84.57196,48.623591 83.8125,49.375 L 73.1875,60 L 117.125,60 C 119.13039,60 119.59375,59.407836 119.59375,57.75 L 119.5,13.65625 L 108.875,24.3125 C 108.12359,25.071964 107.09961,25.5 106.03125,25.5 C 104.96289,25.5 103.93891,25.071964 103.1875,24.3125 C 93.04984,14.174833 79.07711,7.90625 63.59375,7.90625 z "
style="opacity:1;fill:none;fill-opacity:1;stroke:url(#linearGradient3567);stroke-width:1;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:1.08779998;stroke-opacity:1;filter:url(#filter3639)" />
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 35 KiB

View File

@ -12,13 +12,24 @@ defaults.
# The algorithm used to assign a new book in an existing series a series number.
# New series numbers assigned using this tweak are always integer values, except
# if a constant non-integer is specified.
# Possible values are:
# next - Next available number
# next - First available integer larger than the largest existing number
# first_free - First available integer larger than 0
# next_free - First available integer larger than the smallest existing number
# last_free - First available integer smaller than the largest existing number
# Return largest existing + 1 if no free number is found
# const - Assign the number 1 always
# a number - Assign that number always. The number is not in quotes. Note that
# 0.0 can be used here.
# Examples:
# series_index_auto_increment = 'next'
# series_index_auto_increment = 'next_free'
# series_index_auto_increment = 16.5
series_index_auto_increment = 'next'
# The algorithm used to copy author to author_sort
# Possible values are:
# invert: use "fn ln" -> "ln, fn" (the original algorithm)
@ -30,6 +41,20 @@ series_index_auto_increment = 'next'
# selecting 'manage authors', and pressing 'Recalculate all author sort values'.
author_sort_copy_method = 'invert'
# Set which author field to display in the tags pane (the list of authors,
# series, publishers etc on the left hand side). The choices are author and
# author_sort. This tweak affects only what is displayed under the authors
# category in the tags pane and content server. Please note that if you set this
# to author_sort, it is very possible to see duplicate names in the list because
# although it is guaranteed that author names are unique, there is no such
# guarantee for author_sort values. Showing duplicates won't break anything, but
# it could lead to some confusion. When using 'author_sort', the tooltip will
# show the author's name.
# Examples:
# categories_use_field_for_author_name = 'author'
# categories_use_field_for_author_name = 'author_sort'
categories_use_field_for_author_name = 'author'
# Set whether boolean custom columns are two- or three-valued.
# Two-values for true booleans
@ -110,32 +135,53 @@ auto_connect_to_folder = ''
# metadata management is set to automatic. Collections on Sonys are named
# depending upon whether the field is standard or custom. A collection derived
# from a standard field is named for the value in that field. For example, if
# the standard 'series' column contains the name 'Darkover', then the series
# will be named 'Darkover'. A collection derived from a custom field will have
# the name of the field added to the value. For example, if a custom series
# the standard 'series' column contains the value 'Darkover', then the
# collection name is 'Darkover'. A collection derived from a custom field will
# have the name of the field added to the value. For example, if a custom series
# column named 'My Series' contains the name 'Darkover', then the collection
# will be named 'Darkover (My Series)'. If two books have fields that generate
# the same collection name, then both books will be in that collection. This
# tweak lets you specify for a standard or custom field the value to be put
# inside the parentheses. You can use it to add a parenthetical description to a
# will by default be named 'Darkover (My Series)'. For purposes of this
# documentation, 'Darkover' is called the value and 'My Series' is called the
# category. If two books have fields that generate the same collection name,
# then both books will be in that collection.
# This set of tweaks lets you specify for a standard or custom field how
# the collections are to be named. You can use it to add a description to a
# standard field, for example 'Foo (Tag)' instead of the 'Foo'. You can also use
# it to force multiple fields to end up in the same collection. For example, you
# could force the values in 'series', '#my_series_1', and '#my_series_2' to
# appear in collections named 'some_value (Series)', thereby merging all of the
# fields into one set of collections. The syntax of this tweak is
# {'field_lookup_name':'name_to_use', 'lookup_name':'name', ...}
# Example 1: I want three series columns to be merged into one set of
# collections. If the column lookup names are 'series', '#series_1' and
# '#series_2', and if I want nothing in the parenthesis, then the value to use
# in the tweak value would be:
# sony_collection_renaming_rules={'series':'', '#series_1':'', '#series_2':''}
# Example 2: I want the word '(Series)' to appear on collections made from
# series, and the word '(Tag)' to appear on collections made from tags. Use:
# sony_collection_renaming_rules={'series':'Series', 'tags':'Tag'}
# Example 3: I want 'series' and '#myseries' to be merged, and for the
# collection name to have '(Series)' appended. The renaming rule is:
# sony_collection_renaming_rules={'series':'Series', '#myseries':'Series'}
# fields into one set of collections.
# There are two related tweaks. The first determines the category name to use
# for a metadata field. The second is a template, used to determines how the
# value and category are combined to create the collection name.
# The syntax of the first tweak, sony_collection_renaming_rules, is:
# {'field_lookup_name':'category_name_to_use', 'lookup_name':'name', ...}
# The second tweak, sony_collection_name_template, is a template. It uses the
# same template language as plugboards and save templates. This tweak controls
# how the value and category are combined together to make the collection name.
# The only two fields available are {category} and {value}. The {value} field is
# never empty. The {category} field can be empty. The default is to put the
# value first, then the category enclosed in parentheses, it is isn't empty:
# '{value} {category:|(|)}'
# Examples: The first three examples assume that the second tweak
# has not been changed.
# 1: I want three series columns to be merged into one set of collections. The
# column lookup names are 'series', '#series_1' and '#series_2'. I want nothing
# in the parenthesis. The value to use in the tweak value would be:
# sony_collection_renaming_rules={'series':'', '#series_1':'', '#series_2':''}
# 2: I want the word '(Series)' to appear on collections made from series, and
# the word '(Tag)' to appear on collections made from tags. Use:
# sony_collection_renaming_rules={'series':'Series', 'tags':'Tag'}
# 3: I want 'series' and '#myseries' to be merged, and for the collection name
# to have '(Series)' appended. The renaming rule is:
# sony_collection_renaming_rules={'series':'Series', '#myseries':'Series'}
# 4: Same as example 2, but instead of having the category name in parentheses
# and appended to the value, I want it prepended and separated by a colon, such
# as in Series: Darkover. I must change the template used to format the category name
# The resulting two tweaks are:
# sony_collection_renaming_rules={'series':'Series', 'tags':'Tag'}
# sony_collection_name_template='{category:||: }{value}'
sony_collection_renaming_rules={}
sony_collection_name_template='{value}{category:| (|)}'
# Specify how sony collections are sorted. This tweak is only applicable if
@ -219,8 +265,10 @@ generate_cover_title_font = None
generate_cover_foot_font = None
# Behavior of doubleclick on the books list. Choices:
# open_viewer, do_nothing, edit_cell. Default: open_viewer.
# Behavior of doubleclick on the books list. Choices: open_viewer, do_nothing,
# edit_cell, edit_metadata. Selecting edit_metadata has the side effect of
# disabling editing a field using a single click.
# Default: open_viewer.
# Example: doubleclick_on_library_view = 'do_nothing'
doubleclick_on_library_view = 'open_viewer'
@ -235,3 +283,9 @@ doubleclick_on_library_view = 'open_viewer'
# Example: locale_for_sorting = 'fr' -- sort using French rules.
# Example: locale_for_sorting = 'nb' -- sort using Norwegian rules.
locale_for_sorting = ''
# Set whether to use one or two columns for custom metadata when editing
# metadata one book at a time. If True, then the fields are laid out using two
# columns. If False, one column is used.
metadata_single_use_2_cols_for_custom_fields = True

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

View File

Before

Width:  |  Height:  |  Size: 4.7 KiB

After

Width:  |  Height:  |  Size: 4.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1021 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 965 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

After

Width:  |  Height:  |  Size: 11 KiB

View File

@ -0,0 +1,23 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
'''
ajiajin.com/blog
'''
from calibre.web.feeds.news import BasicNewsRecipe
class AjiajinBlog(BasicNewsRecipe):
title = u'Ajiajin blog'
__author__ = 'Hiroshi Miura'
oldest_article = 5
publication_type = 'blog'
max_articles_per_feed = 100
description = 'The next generation internet trends in Japan and Asia'
publisher = ''
category = 'internet, asia, japan'
language = 'en'
encoding = 'utf-8'
feeds = [(u'blog', u'http://feeds.feedburner.com/Asiajin')]

View File

@ -11,6 +11,7 @@ class APOD(BasicNewsRecipe):
remove_javascript = True
recursions = 0
oldest_article = 14
remove_attributes = ['onmouseover', 'onmouseout']
feeds = [
(u'Astronomy Picture of the Day', u'http://apod.nasa.gov/apod.rss')

View File

@ -0,0 +1,69 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
'''
www.businessinsider.com
'''
from calibre.web.feeds.news import BasicNewsRecipe
class Business_insider(BasicNewsRecipe):
title = 'Business Insider'
__author__ = 'Darko Miletic'
description = 'Noticias de Argentina y el resto del mundo'
publisher = 'Business Insider, Inc.'
category = 'news, politics, finances, world'
oldest_article = 2
max_articles_per_feed = 200
no_stylesheets = True
encoding = 'utf8'
use_embedded_content = True
language = 'en'
remove_empty_feeds = True
publication_type = 'newsportal'
masthead_url = 'http://static.businessinsider.com/assets/images/logos/tbi_print.jpg'
extra_css = """
body{font-family: Arial,Helvetica,sans-serif }
img{margin-bottom: 0.4em; display:block}
"""
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
}
remove_tags = [
dict(name=['meta','link'])
,dict(attrs={'class':'feedflare'})
]
remove_attributes=['lang','border']
feeds = [
(u'Latest' , u'http://feeds2.feedburner.com/businessinsider' )
,(u'Markets' , u'http://feeds.feedburner.com/TheMoneyGame' )
,(u'Wall Street' , u'http://feeds.feedburner.com/clusterstock' )
,(u'Tech' , u'http://feeds.feedburner.com/typepad/alleyinsider/silicon_alley_insider')
,(u'The Wire' , u'http://feeds.feedburner.com/businessinsider/thewire' )
,(u'War Room' , u'http://feeds.feedburner.com/businessinsider/warroom' )
,(u'Sports' , u'http://feeds.feedburner.com/businessinsider/sportspage' )
,(u'Tools' , u'http://feeds.feedburner.com/businessinsider/tools' )
,(u'Travel' , u'http://feeds.feedburner.com/businessinsider/travel' )
]
def preprocess_html(self, soup):
for item in soup.findAll(style=True):
del item['style']
for item in soup.findAll('a'):
if item['href'].startswith('http://feedads'):
item.extract()
else:
if item.string is not None:
tstr = item.string
item.replaceWith(tstr)
for item in soup.findAll('img'):
if not item.has_key('alt'):
item['alt'] = 'image'
return soup

View File

@ -1,64 +1,102 @@
__license__ = 'GPL v3'
__copyright__ = '2009, Darko Miletic <darko.miletic at gmail.com>'
__copyright__ = '2008 Kovid Goyal kovid@kovidgoyal.net, 2010 Darko Miletic <darko.miletic at gmail.com>'
'''
http://www.businessweek.com/magazine/news/articles/business_news.htm
www.businessweek.com
'''
from calibre import strftime
from calibre.web.feeds.news import BasicNewsRecipe
class BWmagazine(BasicNewsRecipe):
title = 'BusinessWeek Magazine'
__author__ = 'Darko Miletic'
description = 'Stay up to date with BusinessWeek magazine articles. Read news on international business, personal finances & the economy in the BusinessWeek online magazine.'
class BusinessWeek(BasicNewsRecipe):
title = 'Business Week'
__author__ = 'Kovid Goyal and Darko Miletic'
description = 'Read the latest international business news & stock market news. Get updated company profiles, financial advice, global economy and technology news.'
publisher = 'Bloomberg L.P.'
category = 'news, International Business News, current news in international business,international business articles, personal business, business week magazine, business week magazine articles, business week magazine online, business week online magazine'
oldest_article = 10
max_articles_per_feed = 100
category = 'Business, business news, stock market, stock market news, financial advice, company profiles, financial advice, global economy, technology news'
oldest_article = 7
max_articles_per_feed = 200
no_stylesheets = True
encoding = 'utf-8'
encoding = 'utf8'
use_embedded_content = False
language = 'en'
INDEX = 'http://www.businessweek.com/magazine/news/articles/business_news.htm'
remove_empty_feeds = True
publication_type = 'magazine'
cover_url = 'http://images.businessweek.com/mz/covers/current_120x160.jpg'
masthead_url = 'http://assets.businessweek.com/images/bw-logo.png'
extra_css = """
body{font-family: Helvetica,Arial,sans-serif }
img{margin-bottom: 0.4em; display:block}
.tagline{color: gray; font-style: italic}
.photoCredit{font-size: small; color: gray}
"""
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
}
remove_tags = [
dict(attrs={'class':'inStory'})
,dict(name=['meta','link','iframe','base','embed','object','table','th','tr','td'])
,dict(attrs={'id':['inset','videoDisplay']})
]
keep_only_tags = [dict(name='div', attrs={'id':['story-body','storyBody']})]
remove_attributes = ['lang']
match_regexps = [r'http://www.businessweek.com/.*_page_[1-9].*']
def parse_index(self):
articles = []
soup = self.index_to_soup(self.INDEX)
ditem = soup.find('div',attrs={'id':'column2'})
if ditem:
for item in ditem.findAll('h3'):
title_prefix = ''
description = ''
feed_link = item.find('a')
if feed_link and feed_link.has_key('href'):
url = 'http://www.businessweek.com/magazine/' + feed_link['href'].partition('../../')[2]
title = title_prefix + self.tag_to_string(feed_link)
date = strftime(self.timefmt)
articles.append({
'title' :title
,'date' :date
,'url' :url
,'description':description
})
return [(soup.head.title.string, articles)]
keep_only_tags = dict(name='div', attrs={'id':'storyBody'})
feeds = [
(u'Top Stories', u'http://www.businessweek.com/topStories/rss/topStories.rss'),
(u'Top News' , u'http://www.businessweek.com/rss/bwdaily.rss' ),
(u'Asia', u'http://www.businessweek.com/rss/asia.rss'),
(u'Autos', u'http://www.businessweek.com/rss/autos/index.rss'),
(u'Classic Cars', u'http://rss.businessweek.com/bw_rss/classiccars'),
(u'Hybrids', u'http://rss.businessweek.com/bw_rss/hybrids'),
(u'Europe', u'http://www.businessweek.com/rss/europe.rss'),
(u'Auto Reviews', u'http://rss.businessweek.com/bw_rss/autoreviews'),
(u'Innovation & Design', u'http://www.businessweek.com/rss/innovate.rss'),
(u'Architecture', u'http://www.businessweek.com/rss/architecture.rss'),
(u'Brand Equity', u'http://www.businessweek.com/rss/brandequity.rss'),
(u'Auto Design', u'http://www.businessweek.com/rss/carbuff.rss'),
(u'Game Room', u'http://rss.businessweek.com/bw_rss/gameroom'),
(u'Technology', u'http://www.businessweek.com/rss/technology.rss'),
(u'Investing', u'http://rss.businessweek.com/bw_rss/investor'),
(u'Small Business', u'http://www.businessweek.com/rss/smallbiz.rss'),
(u'Careers', u'http://rss.businessweek.com/bw_rss/careers'),
(u'B-Schools', u'http://www.businessweek.com/rss/bschools.rss'),
(u'Magazine Selections', u'http://www.businessweek.com/rss/magazine.rss'),
(u'CEO Guide to Tech', u'http://www.businessweek.com/rss/ceo_guide_tech.rss'),
]
def get_article_url(self, article):
url = article.get('guid', None)
if 'podcasts' in url:
return None
if 'surveys' in url:
return None
if 'images' in url:
return None
if 'feedroom' in url:
return None
if '/magazine/toc/' in url:
return None
rurl, sep, rest = url.rpartition('?')
if rurl:
return rurl
return rest
def print_version(self, url):
rurl = url.rpartition('?')[0]
if rurl == '':
rurl = url
return rurl.replace('.com/magazine/','.com/print/magazine/')
if '/news/' in url or '/blog/ in url':
return url
rurl = url.replace('http://www.businessweek.com/','http://www.businessweek.com/print/')
return rurl.replace('/investing/','/investor/')
def preprocess_html(self, soup):
for item in soup.findAll(style=True):
del item['style']
for alink in soup.findAll('a'):
if alink.string is not None:
tstr = alink.string
alink.replaceWith(tstr)
return soup

View File

@ -0,0 +1,37 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
'''
http://ameblo.jp/
'''
import re
from calibre.web.feeds.news import BasicNewsRecipe
class SakuraBlog(BasicNewsRecipe):
title = u'chou chou blog'
__author__ = 'Hiroshi Miura'
oldest_article = 4
publication_type = 'blog'
max_articles_per_feed = 20
description = 'Japanese popular dog blog'
publisher = ''
category = 'dog, pet, japan'
language = 'ja'
encoding = 'utf-8'
use_embedded_content = True
feeds = [(u'blog', u'http://feedblog.ameba.jp/rss/ameblo/chouchou1218/rss20.xml')]
def parse_feeds(self):
feeds = BasicNewsRecipe.parse_feeds(self)
for curfeed in feeds:
delList = []
for a,curarticle in enumerate(curfeed.articles):
if re.search(r'rssad.jp', curarticle.url):
delList.append(curarticle)
if len(delList)>0:
for d in delList:
index = curfeed.articles.index(d)
curfeed.articles[index:index+1] = []
return feeds

View File

@ -0,0 +1,67 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Derek Liang <Derek.liang.ca @@@at@@@ gmail.com>'
'''
cnd.org
'''
import re
from calibre.web.feeds.news import BasicNewsRecipe
class TheCND(BasicNewsRecipe):
title = 'CND'
__author__ = 'Derek Liang'
description = ''
INDEX = 'http://cnd.org'
language = 'zh'
conversion_options = {'linearize_tables':True}
remove_tags_before = dict(name='div', id='articleHead')
remove_tags_after = dict(id='copyright')
remove_tags = [dict(name='table', attrs={'align':'right'}), dict(name='img', attrs={'src':'http://my.cnd.org/images/logo.gif'}), dict(name='hr', attrs={}), dict(name='small', attrs={})]
no_stylesheets = True
preprocess_regexps = [(re.compile(r'<!--.*?-->', re.DOTALL), lambda m: '')]
def print_version(self, url):
if url.find('news/article.php') >= 0:
return re.sub("^[^=]*", "http://my.cnd.org/modules/news/print.php?storyid", url)
else:
return re.sub("^[^=]*", "http://my.cnd.org/modules/wfsection/print.php?articleid", url)
def parse_index(self):
soup = self.index_to_soup(self.INDEX)
feeds = []
articles = {}
for a in soup.findAll('a', attrs={'target':'_cnd'}):
url = a['href']
if url.find('article.php') < 0 :
continue
if url.startswith('/'):
url = 'http://cnd.org'+url
title = self.tag_to_string(a)
self.log('\tFound article: ', title, 'at', url)
date = a.nextSibling
if (date is not None) and len(date)>2:
if not articles.has_key(date):
articles[date] = []
articles[date].append({'title':title, 'url':url, 'description': '', 'date':''})
self.log('\t\tAppend to : ', date)
self.log('log articles', articles)
mostCurrent = sorted(articles).pop()
self.title = 'CND ' + mostCurrent
feeds.append((self.title, articles[mostCurrent]))
return feeds
def populate_article_metadata(self, article, soup, first):
header = soup.find('h3')
self.log('header: ' + self.tag_to_string(header))
pass

View File

@ -3,15 +3,16 @@ __copyright__ = '2009, Darko Miletic <darko.miletic at gmail.com>'
'''
http://www.dilbert.com
'''
import re
from calibre.web.feeds.recipes import BasicNewsRecipe
import re
class DosisDiarias(BasicNewsRecipe):
class DilbertBig(BasicNewsRecipe):
title = 'Dilbert'
__author__ = 'Darko Miletic'
__author__ = 'Darko Miletic and Starson17'
description = 'Dilbert'
oldest_article = 5
reverse_article_order = True
oldest_article = 15
max_articles_per_feed = 100
no_stylesheets = True
use_embedded_content = True
@ -29,20 +30,23 @@ class DosisDiarias(BasicNewsRecipe):
feeds = [(u'Dilbert', u'http://feeds.dilbert.com/DilbertDailyStrip' )]
preprocess_regexps = [
(re.compile('strip\..*\.gif', re.DOTALL|re.IGNORECASE),
lambda match: 'strip.zoom.gif')
]
def get_article_url(self, article):
return article.get('feedburner_origlink', None)
preprocess_regexps = [
(re.compile('strip\..*\.gif', re.DOTALL|re.IGNORECASE), lambda match: 'strip.zoom.gif')
]
def preprocess_html(self, soup):
for tag in soup.findAll(name='a'):
if tag['href'].find('http://feedads') >= 0:
tag.extract()
return soup
extra_css = '''
h1{font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:large;}
h2{font-family:Arial,Helvetica,sans-serif; font-weight:normal;font-size:small;}
img {max-width:100%; min-width:100%;}
p{font-family:Arial,Helvetica,sans-serif;font-size:small;}
body{font-family:Helvetica,Arial,sans-serif;font-size:small;}
'''

View File

@ -0,0 +1,42 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
'''
globaleconomicanalysis.blogspot.com
'''
from calibre.web.feeds.news import BasicNewsRecipe
class GlobalEconomicAnalysis(BasicNewsRecipe):
title = "Mish's Global Economic Trend Analysis"
__author__ = 'Darko Miletic'
description = 'Thoughts on the global economy, housing, gold, silver, interest rates, oil, energy, China, commodities, the dollar, Euro, Renminbi, Yen, inflation, deflation, stagflation, precious metals, emerging markets, and policy decisions that affect the global markets.'
publisher = 'Mike Shedlock'
category = 'news, politics, economy, banking'
oldest_article = 7
max_articles_per_feed = 200
no_stylesheets = True
encoding = 'utf8'
use_embedded_content = True
language = 'en'
remove_empty_feeds = True
publication_type = 'blog'
masthead_url = 'http://www.pagina12.com.ar/commons/imgs/logo-home.gif'
extra_css = """
body{font-family: Arial,Helvetica,sans-serif }
img{margin-bottom: 0.4em; display:block}
"""
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
}
remove_tags = [
dict(name=['meta','link','iframe','object','embed'])
,dict(attrs={'class':'blogger-post-footer'})
]
remove_attributes=['border']
feeds = [(u'Articles', u'http://feeds2.feedburner.com/MishsGlobalEconomicTrendAnalysis')]

View File

@ -0,0 +1,109 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = '04 December 2010, desUBIKado'
__author__ = 'desUBIKado'
__description__ = 'Daily newspaper from Aragon'
__version__ = 'v0.05'
__date__ = '07, December 2010'
'''
elperiodicodearagon.com
'''
import re
from calibre.web.feeds.news import BasicNewsRecipe
class elperiodicodearagon(BasicNewsRecipe):
title = u'El Periodico de Aragon'
__author__ = u'desUBIKado'
description = u'Noticias desde Aragon'
publisher = u'elperiodicodearagon.com'
category = u'news, politics, Spain, Aragon'
oldest_article = 2
delay = 0
max_articles_per_feed = 100
no_stylesheets = True
use_embedded_content = False
language = 'es'
encoding = 'utf8'
remove_empty_feeds = True
remove_javascript = True
conversion_options = {
'comments' : description
,'tags' : category
,'language' : language
,'publisher' : publisher
}
feeds = [(u'Arag\xf3n', u'http://elperiodicodearagon.com/RSS/2.xml'),
(u'Internacional', u'http://elperiodicodearagon.com/RSS/4.xml'),
(u'Espa\xf1a', u'http://elperiodicodearagon.com/RSS/3.xml'),
(u'Econom\xeda', u'http://elperiodicodearagon.com/RSS/5.xml'),
(u'Deportes', u'http://elperiodicodearagon.com/RSS/7.xml'),
(u'Real Zaragoza', u'http://elperiodicodearagon.com/RSS/10.xml'),
(u'Opini\xf3n', u'http://elperiodicodearagon.com/RSS/103.xml'),
(u'Escenarios', u'http://elperiodicodearagon.com/RSS/105.xml'),
(u'Sociedad', u'http://elperiodicodearagon.com/RSS/104.xml'),
(u'Gente', u'http://elperiodicodearagon.com/RSS/330.xml')]
extra_css = '''
h3{font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:xx-large;}
h2{font-family:Arial,Helvetica,sans-serif; font-weight:normal;font-size:small;}
dd{font-family:Arial,Helvetica,sans-serif; font-weight:normal;font-size:small;}
'''
remove_attributes = ['height','width']
keep_only_tags = [dict(name='div', attrs={'id':'contenidos'})]
# Quitar toda la morralla
remove_tags = [dict(name='ul', attrs={'class':'herramientasDeNoticia'}),
dict(name='span', attrs={'class':'MasInformacion '}),
dict(name='span', attrs={'class':'MasInformacion'}),
dict(name='div', attrs={'class':'Middle'}),
dict(name='div', attrs={'class':'MenuCabeceraRZaragoza'}),
dict(name='div', attrs={'id':'MenuCabeceraRZaragoza'}),
dict(name='div', attrs={'class':'MenuEquipo'}),
dict(name='div', attrs={'class':'TemasRelacionados'}),
dict(name='div', attrs={'class':'GaleriaEnNoticia'}),
dict(name='div', attrs={'class':'Recorte'}),
dict(name='div', attrs={'id':'NoticiasenRecursos'}),
dict(name='div', attrs={'id':'NoticiaEnPapel'}),
dict(name='p', attrs={'class':'RecorteEnNoticias'}),
dict(name='div', attrs={'id':'Comparte'}),
dict(name='div', attrs={'id':'CajaComparte'}),
dict(name='a', attrs={'class':'EscribirComentario'}),
dict(name='a', attrs={'class':'AvisoComentario'}),
dict(name='div', attrs={'class':'CajaAvisoComentario'}),
dict(name='div', attrs={'class':'navegaNoticias'}),
dict(name='div', attrs={'id':'PaginadorDiCom'}),
dict(name='div', attrs={'id':'CajaAccesoCuentaUsuario'}),
dict(name='div', attrs={'id':'CintilloComentario'}),
dict(name='div', attrs={'id':'EscribeComentario'}),
dict(name='div', attrs={'id':'FormularioComentario'}),
dict(name='div', attrs={'id':'FormularioNormas'})]
# Recuperamos la portada de papel (la imagen format=1 tiene mayor resolucion)
def get_cover_url(self):
index = 'http://pdf.elperiodicodearagon.com/'
soup = self.index_to_soup(index)
for image in soup.findAll('img',src=True):
if image['src'].startswith('http://pdf.elperiodicodearagon.com/funciones/portada-preview.php?eid='):
return image['src'].rstrip('format=2') + 'format=1'
return None
# Para quitar espacios entre la noticia y los comentarios (lineas 1 y 2)
# El indice no apuntaba correctamente al empiece de la noticia (linea 3)
preprocess_regexps = [
(re.compile(r'<p>&nbsp;</p>', re.DOTALL|re.IGNORECASE), lambda match: ''),
(re.compile(r'<p> </p>', re.DOTALL|re.IGNORECASE), lambda match: ''),
(re.compile(r'<p id="">', re.DOTALL|re.IGNORECASE), lambda match: '<p>')
]

View File

@ -1,86 +1,95 @@
# -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
'''
www.elpais.com/diario/
www.elpais.com
'''
from calibre import strftime
from calibre.web.feeds.news import BasicNewsRecipe
class ElPaisImpresa(BasicNewsRecipe):
title = u'El Pa\xeds - edicion impresa'
class ElPais_RSS(BasicNewsRecipe):
title = 'El Pais'
__author__ = 'Darko Miletic'
description = u'el periodico global en Espa\xf1ol'
description = 'el periodico global en Castellano'
publisher = 'EDICIONES EL PAIS, S.L.'
category = 'news, politics,Spain,actualidad,noticias,informacion,videos,fotografias,audios,graficos,nacional,internacional,deportes,economia,tecnologia,cultura,gente,television,sociedad,opinion,blogs,foros,chats,encuestas,entrevistas,participacion'
category = 'news, politics, finances, world, spain'
oldest_article = 2
max_articles_per_feed = 200
no_stylesheets = True
encoding = 'latin1'
encoding = 'cp1252'
use_embedded_content = False
language = 'es'
language = 'es_ES'
remove_empty_feeds = True
publication_type = 'newspaper'
masthead_url = 'http://www.elpais.com/im/tit_logo_global.gif'
index = 'http://www.elpais.com/diario/'
extra_css = ' p{text-align: justify} body{ text-align: left; font-family: Georgia,"Times New Roman",Times,serif } h2{font-family: Arial,Helvetica,sans-serif} img{margin-bottom: 0.4em} '
masthead_url = 'http://www.elpais.com/im/tit_logo.gif'
extra_css = """
body{font-family: Georgia,"Times New Roman",Times,serif }
h3{font-family: Arial,Helvetica,sans-serif}
img{margin-bottom: 0.4em; display:block}
"""
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
}
feeds = [
(u'Internacional' , index + u'internacional/' )
,(u'Espa\xf1a' , index + u'espana/' )
,(u'Economia' , index + u'economia/' )
,(u'Opinion' , index + u'opinion/' )
,(u'Vi\xf1etas' , index + u'vineta/' )
,(u'Sociedad' , index + u'sociedad/' )
,(u'Cultura' , index + u'cultura/' )
,(u'Tendencias' , index + u'tendencias/' )
,(u'Gente' , index + u'gente/' )
,(u'Obituarios' , index + u'obituarios/' )
,(u'Deportes' , index + u'deportes/' )
,(u'Pantallas' , index + u'radioytv/' )
,(u'Ultima' , index + u'ultima/' )
,(u'Educacion' , index + u'educacion/' )
,(u'Saludo' , index + u'salud/' )
,(u'Ciberpais' , index + u'ciberpais/' )
,(u'EP3' , index + u'ep3/' )
,(u'Cine' , index + u'cine/' )
,(u'Babelia' , index + u'babelia/' )
,(u'El viajero' , index + u'viajero/' )
,(u'Negocios' , index + u'negocios/' )
,(u'Domingo' , index + u'domingo/' )
,(u'El Pais semanal' , index + u'eps/' )
,(u'Quadern Catalunya' , index + u'quadern-catalunya/' )
]
keep_only_tags = [dict(attrs={'class':['cabecera_noticia estirar','cabecera_noticia','','contenido_noticia']})]
remove_tags = [
dict(name=['meta','link','base','iframe','embed','object'])
,dict(attrs={'class':['info_complementa','estructura_2col_der','votos estirar','votos']})
,dict(attrs={'id':'utilidades'})
]
remove_tags_after = dict(attrs={'id':'utilidades'})
remove_attributes = ['lang','border','width','height']
keep_only_tags=[dict(attrs={'class':['cabecera_noticia','contenido_noticia']})]
remove_attributes=['width','height']
remove_tags=[dict(name='link')]
def parse_index(self):
totalfeeds = []
lfeeds = self.get_feeds()
for feedobj in lfeeds:
feedtitle, feedurl = feedobj
self.report_progress(0, _('Fetching feed')+' %s...'%(feedtitle if feedtitle else feedurl))
articles = []
soup = self.index_to_soup(feedurl)
for item in soup.findAll('a',attrs={'class':['g19r003','g19i003','g17r003','g17i003']}):
url = 'http://www.elpais.com' + item['href'].rpartition('/')[0]
title = self.tag_to_string(item)
date = strftime(self.timefmt)
articles.append({
'title' :title
,'date' :date
,'url' :url
,'description':''
})
totalfeeds.append((feedtitle, articles))
return totalfeeds
feeds = [
(u'Lo ultimo' , u'http://www.elpais.com/rss/feed.html?feedId=17046')
,(u'America Latina' , u'http://www.elpais.com/rss/feed.html?feedId=17041')
,(u'Mexico' , u'http://www.elpais.com/rss/feed.html?feedId=17042')
,(u'Europa' , u'http://www.elpais.com/rss/feed.html?feedId=17043')
,(u'Estados Unidos' , u'http://www.elpais.com/rss/feed.html?feedId=17044')
,(u'Oriente proximo' , u'http://www.elpais.com/rss/feed.html?feedId=17045')
,(u'Espana' , u'http://www.elpais.com/rss/feed.html?feedId=1002' )
,(u'Andalucia' , u'http://www.elpais.com/rss/feed.html?feedId=17057')
,(u'Catalunia' , u'http://www.elpais.com/rss/feed.html?feedId=17059')
,(u'Comunidad Valenciana' , u'http://www.elpais.com/rss/feed.html?feedId=17061')
,(u'Madrid' , u'http://www.elpais.com/rss/feed.html?feedId=1016' )
,(u'Pais Vasco' , u'http://www.elpais.com/rss/feed.html?feedId=17062')
,(u'Galicia' , u'http://www.elpais.com/rss/feed.html?feedId=17063')
,(u'Opinion' , u'http://www.elpais.com/rss/feed.html?feedId=1003' )
,(u'Sociedad' , u'http://www.elpais.com/rss/feed.html?feedId=1004' )
,(u'Deportes' , u'http://www.elpais.com/rss/feed.html?feedId=1007' )
,(u'Cultura' , u'http://www.elpais.com/rss/feed.html?feedId=1008' )
,(u'Cine' , u'http://www.elpais.com/rss/feed.html?feedId=17052')
,(u'Literatura' , u'http://www.elpais.com/rss/feed.html?feedId=17053')
,(u'Musica' , u'http://www.elpais.com/rss/feed.html?feedId=17051')
,(u'Arte' , u'http://www.elpais.com/rss/feed.html?feedId=17060')
,(u'Tecnologia' , u'http://www.elpais.com/rss/feed.html?feedId=1005' )
,(u'Economia' , u'http://www.elpais.com/rss/feed.html?feedId=1006' )
,(u'Ciencia' , u'http://www.elpais.com/rss/feed.html?feedId=17068')
,(u'Salud' , u'http://www.elpais.com/rss/feed.html?feedId=17074')
,(u'Ocio' , u'http://www.elpais.com/rss/feed.html?feedId=17075')
,(u'Justicia y Leyes' , u'http://www.elpais.com/rss/feed.html?feedId=17069')
,(u'Guerras y conflictos' , u'http://www.elpais.com/rss/feed.html?feedId=17070')
,(u'Politica' , u'http://www.elpais.com/rss/feed.html?feedId=17073')
]
def print_version(self, url):
return url + '?print=1'
def preprocess_html(self, soup):
for item in soup.findAll(style=True):
del item['style']
for item in soup.findAll('a'):
if item.string is not None:
tstr = item.string
item.replaceWith(tstr)
else:
item.name='span'
for atrs in ['href','target','alt','title']:
if item.has_key(atrs):
del item[atrs]
for item in soup.findAll('img',alt=False):
item['alt'] = 'image'
return soup

View File

@ -40,13 +40,12 @@ class GazetvanAntwerpen(BasicNewsRecipe):
remove_tags_after = dict(name='span', attrs={'class':'author'})
feeds = [
(u'Overzicht & Blikvanger', u'http://www.gva.be/syndicationservices/artfeedservice.svc/rss/overview/overzicht' )
(u'Binnenland' , u'http://www.gva.be/syndicationservices/artfeedservice.svc/rss/mostrecent/binnenland' )
,(u'Buitenland' , u'http://www.gva.be/syndicationservices/artfeedservice.svc/rss/mostrecent/buitenland' )
,(u'Stad & Regio' , u'http://www.gva.be/syndicationservices/artfeedservice.svc/rss/mostrecent/stadenregio' )
,(u'Economie' , u'http://www.gva.be/syndicationservices/artfeedservice.svc/rss/mostrecent/economie' )
,(u'Binnenland' , u'http://www.gva.be/syndicationservices/artfeedservice.svc/rss/mostrecent/binnenland' )
,(u'Buitenland' , u'http://www.gva.be/syndicationservices/artfeedservice.svc/rss/mostrecent/buitenland' )
,(u'Media & Cultur' , u'http://www.gva.be/syndicationservices/artfeedservice.svc/rss/mostrecent/mediaencultuur')
,(u'Wetenschap' , u'http://www.gva.be/syndicationservices/artfeedservice.svc/rss/mostrecent/mediaencultuur')
,(u'Wetenschap' , u'http://www.gva.be/syndicationservices/artfeedservice.svc/rss/mostrecent/wetenschap' )
,(u'Sport' , u'http://www.gva.be/syndicationservices/artfeedservice.svc/rss/mostrecent/sport' )
]

View File

@ -1,50 +1,65 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__author__ = 'Lorenzo Vigentini'
__copyright__ = '2009, Lorenzo Vigentini <l.vigentini at gmail.com>'
__license__ = 'GPL v3'
__copyright__ = '04 December 2010, desUBIKado'
__author__ = 'desUBIKado'
__description__ = 'Daily newspaper from Aragon'
__version__ = 'v1.01'
__date__ = '30, January 2010'
__version__ = 'v0.03'
__date__ = '11, December 2010'
'''
http://www.heraldo.es/
[url]http://www.heraldo.es/[/url]
'''
import time
from calibre.web.feeds.news import BasicNewsRecipe
class heraldo(BasicNewsRecipe):
author = 'Lorenzo Vigentini'
__author__ = 'desUBIKado'
description = 'Daily newspaper from Aragon'
cover_url = 'http://www.heraldo.es/MODULOS/global/publico/interfaces/img/logo.gif'
title = u'Heraldo de Aragon'
publisher = 'OJD Nielsen'
category = 'News, politics, culture, economy, general interest'
language = 'es'
timefmt = '[%a, %d %b, %Y]'
oldest_article = 1
max_articles_per_feed = 25
max_articles_per_feed = 100
use_embedded_content = False
recursion = 10
remove_javascript = True
no_stylesheets = True
keep_only_tags = [
dict(name='div', attrs={'class':['titularNoticiaNN','textoGrisVerdanaContenidos']})
]
recursion = 10
feeds = [
(u'Portadas ', u'http://www.heraldo.es/index.php/mod.portadas/mem.rss')
]
(u'Portadas', u'http://www.heraldo.es/index.php/mod.portadas/mem.rss')
]
keep_only_tags = [dict(name='div', attrs={'id':['dts','com']})]
remove_tags = [dict(name='a', attrs={'class':['com flo-r','enl-if','enl-df']}),
dict(name='div', attrs={'class':['brb-b-s con marg-btt','cnt-rel con']}),
dict(name='form', attrs={'class':'form'})]
remove_tags_before = dict(name='div' , attrs={'id':'dts'})
remove_tags_after = dict(name='div' , attrs={'id':'com'})
def get_cover_url(self):
cover = None
st = time.localtime()
year = str(st.tm_year)
month = "%.2d" % st.tm_mon
day = "%.2d" % st.tm_mday
#[url]http://oldorigin-www.heraldo.es/20101211/primeras/portada_aragon.pdf[/url]
cover='http://oldorigin-www.heraldo.es/'+ year + month + day +'/primeras/portada_aragon.pdf'
br = BasicNewsRecipe.get_browser()
try:
br.open(cover)
except:
self.log("\nPortada no disponible")
cover ='http://www.heraldo.es/MODULOS/global/publico/interfaces/img/logo-Heraldo.png'
return cover
extra_css = '''
.articledate {color: gray;font-family: monospace;}
.articledescription {display: block;font-family: sans;font-size: 0.7em; text-indent: 0;}
.firma {color: #666;display: block;font-family: verdana, arial, helvetica;font-size: 1em;margin-bottom: 8px;}
.textoGrisVerdanaContenidos {color: #56595c;display: block;font-family: Verdana;font-size: 1.28571em;padding-bottom: 10px}
.titularNoticiaNN {display: block;padding-bottom: 10px;padding-left: 0;padding-right: 0;padding-top: 4px}
.titulo {color: #003066;font-family: Tahoma;font-size: 1.92857em;font-weight: bold;line-height: 1.2em}
'''
h2{font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:xx-large;}
'''

View File

@ -1,88 +1,72 @@
# -*- coding: utf-8 -*-
import re
from calibre.web.feeds.recipes import BasicNewsRecipe
class JournalofHospitalMedicine(BasicNewsRecipe):
title = 'Journal of Hospital Medicine'
__author__ = 'Krittika Goyal'
__author__ = 'Kovid Goyal'
description = 'Medical news'
timefmt = ' [%d %b, %Y]'
needs_subscription = True
language = 'en'
no_stylesheets = True
#remove_tags_before = dict(name='div', attrs={'align':'center'})
#remove_tags_after = dict(name='ol', attrs={'compact':'COMPACT'})
remove_tags = [
dict(name='iframe'),
dict(name='div', attrs={'class':'subContent'}),
dict(name='div', attrs={'id':['contentFrame']}),
#dict(name='form', attrs={'onsubmit':"return verifySearch(this.w,'Keyword, citation, or author')"}),
#dict(name='table', attrs={'align':'RIGHT'}),
]
keep_only_tags = [dict(id=['articleTitle', 'articleMeta', 'fulltext'])]
remove_tags = [dict(attrs={'class':'licensedContent'})]
# TO LOGIN
def get_browser(self):
br = BasicNewsRecipe.get_browser()
br.open('http://www3.interscience.wiley.com/cgi-bin/home')
br.select_form(name='siteLogin')
br['LoginName'] = self.username
br['Password'] = self.password
br.select_form(nr=0)
br['j_username'] = self.username
br['j_password'] = self.password
response = br.submit()
raw = response.read()
if 'userName = ""' in raw:
if '<h2>LOGGED IN</h2>' not in raw:
raise Exception('Login failed. Check your username and password')
return br
#TO GET ARTICLE TOC
def johm_get_index(self):
return self.index_to_soup('http://www3.interscience.wiley.com/journal/111081937/home')
return self.index_to_soup('http://onlinelibrary.wiley.com/journal/10.1002/(ISSN)1553-5606/currentissue')
# To parse artice toc
def parse_index(self):
parse_soup = self.johm_get_index()
soup = self.johm_get_index()
toc = soup.find(id='issueTocGroups')
feeds = []
for group in toc.findAll('li', id=re.compile(r'group\d+')):
gtitle = group.find(attrs={'class':'subSectionHeading'})
if gtitle is None:
continue
gtitle = self.tag_to_string(gtitle)
arts = group.find(attrs={'class':'articles'})
if arts is None:
continue
self.log('Found section:', gtitle)
articles = []
for art in arts.findAll(attrs={'class':lambda x: x and 'tocArticle'
in x}):
a = art.find('a', href=True)
if a is None:
continue
url = a.get('href')
if url.startswith('/'):
url = 'http://onlinelibrary.wiley.com' + url
url = url.replace('/abstract', '/full')
title = self.tag_to_string(a)
a.extract()
pm = art.find(attrs={'class':'productMenu'})
if pm is not None:
pm.extract()
desc = self.tag_to_string(art)
self.log('\tFound article:', title, 'at', url)
articles.append({'title':title, 'url':url, 'description':desc,
'date':''})
if articles:
feeds.append((gtitle, articles))
div = parse_soup.find(id='contentCell')
current_section = None
current_articles = []
feeds = []
for x in div.findAll(True):
if x.name == 'h4':
# Section heading found
if current_articles and current_section:
feeds.append((current_section, current_articles))
current_section = self.tag_to_string(x)
current_articles = []
self.log('\tFound section:', current_section)
if current_section is not None and x.name == 'strong':
title = self.tag_to_string(x)
p = x.parent.parent.find('a', href=lambda x: x and '/HTMLSTART' in x)
if p is None:
continue
url = p.get('href', False)
if not url or not title:
continue
if url.startswith('/'):
url = 'http://www3.interscience.wiley.com'+url
url = url.replace('/HTMLSTART', '/main.html,ftx_abs')
self.log('\t\tFound article:', title)
self.log('\t\t\t', url)
#if url.startswith('/'):
#url = 'http://online.wsj.com'+url
current_articles.append({'title': title, 'url':url,
'description':'', 'date':''})
if current_articles and current_section:
feeds.append((current_section, current_articles))
return feeds
def preprocess_html(self, soup):
for img in soup.findAll('img', src=True):
img['src'] = img['src'].replace('tfig', 'nfig')
return soup
return feeds

View File

@ -0,0 +1,31 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
'''
www.kahoku.co.jp
'''
from calibre.web.feeds.news import BasicNewsRecipe
class KahokuShinpoNews(BasicNewsRecipe):
title = u'\u6cb3\u5317\u65b0\u5831'
__author__ = 'Hiroshi Miura'
oldest_article = 2
max_articles_per_feed = 20
description = 'Tohoku regional news paper in Japan'
publisher = 'Kahoku Shinpo Sha'
category = 'news, japan'
language = 'ja'
encoding = 'Shift_JIS'
no_stylesheets = True
feeds = [(u'news', u'http://www.kahoku.co.jp/rss/index_thk.xml')]
keep_only_tags = [ dict(id="page_title"),
dict(id="news_detail"),
dict(id="bt_title"),
{'class':"photoLeft"},
dict(id="bt_body")
]
remove_tags = [ {'class':"button"}]

View File

@ -0,0 +1,36 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = '2010, Vadim Dyadkin, dyadkin@gmail.com'
__author__ = 'Vadim Dyadkin'
from calibre.web.feeds.news import BasicNewsRecipe
class Computerra(BasicNewsRecipe):
title = u'\u041a\u043e\u043c\u043f\u044c\u044e\u0442\u0435\u0440\u0440\u0430'
recursion = 50
oldest_article = 100
__author__ = 'Vadim Dyadkin'
max_articles_per_feed = 100
use_embedded_content = False
simultaneous_downloads = 5
language = 'ru'
description = u'\u041a\u043e\u043c\u043f\u044c\u044e\u0442\u0435\u0440\u044b, \u043e\u043a\u043e\u043b\u043e\u043d\u0430\u0443\u0447\u043d\u044b\u0435 \u0438 \u043e\u043a\u043e\u043b\u043e\u0444\u0438\u043b\u043e\u0441\u043e\u0444\u0441\u043a\u0438\u0435 \u0441\u0442\u0430\u0442\u044c\u0438, \u0433\u0430\u0434\u0436\u0435\u0442\u044b.'
keep_only_tags = [dict(name='div', attrs={'id': 'content'}),]
feeds = [(u'\u041a\u043e\u043c\u043f\u044c\u044e\u0442\u0435\u0440\u0440\u0430', 'http://feeds.feedburner.com/ct_news/'),]
remove_tags = [dict(name='div', attrs={'id': ['fin', 'idc-container', 'idc-noscript',]}),
dict(name='ul', attrs={'class': "related_post"}),
dict(name='p', attrs={'class': 'info'}),
dict(name='a', attrs={'rel': 'tag', 'class': 'twitter-share-button', 'type': 'button_count'}),
dict(name='h2', attrs={}),]
extra_css = 'body { text-align: justify; }'
def get_article_url(self, article):
return article.get('feedburner:origLink', article.get('guid'))

View File

@ -78,4 +78,6 @@ class Lanacion(BasicNewsRecipe):
]
def preprocess_html(self, soup):
for item in soup.findAll(style=True):
del item['style']
return self.adeify_images(soup)

View File

@ -4,7 +4,7 @@ from calibre.web.feeds.recipes import BasicNewsRecipe
class LeMonde(BasicNewsRecipe):
title = 'Le Monde'
__author__ = 'veezh'
description = 'Actualités'
description = u'Actualit\xe9s'
oldest_article = 1
max_articles_per_feed = 100
no_stylesheets = True

View File

@ -0,0 +1,38 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
'''
nationalgeographic.com
'''
from calibre.web.feeds.news import BasicNewsRecipe
import re
class NationalGeographicNews(BasicNewsRecipe):
title = u'National Geographic News'
oldest_article = 7
max_articles_per_feed = 100
remove_javascript = True
no_stylesheets = True
use_embedded_content = False
feeds = [(u'news', u'http://feeds.nationalgeographic.com/ng/News/News_Main')]
remove_tags_before = dict(id='page_head')
remove_tags_after = [dict(id='social_buttons'),{'class':'aside'}]
remove_tags = [
{'class':'hidden'}
]
def parse_feeds(self):
feeds = BasicNewsRecipe.parse_feeds(self)
for curfeed in feeds:
delList = []
for a,curarticle in enumerate(curfeed.articles):
if re.search(r'ads\.pheedo\.com', curarticle.url):
delList.append(curarticle)
if len(delList)>0:
for d in delList:
index = curfeed.articles.index(d)
curfeed.articles[index:index+1] = []
return feeds

View File

@ -0,0 +1,20 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
'''
nationalgeographic.co.jp
'''
from calibre.web.feeds.news import BasicNewsRecipe
import re
class NationalGeoJp(BasicNewsRecipe):
title = u'\u30ca\u30b7\u30e7\u30ca\u30eb\u30fb\u30b8\u30aa\u30b0\u30e9\u30d5\u30a3\u30c3\u30af\u30cb\u30e5\u30fc\u30b9'
oldest_article = 7
max_articles_per_feed = 100
no_stylesheets = True
feeds = [(u'news', u'http://www.nationalgeographic.co.jp/news/rss.php')]
def print_version(self, url):
return re.sub(r'news_article.php','news_printer_friendly.php', url)

View File

@ -4,23 +4,14 @@ from calibre.web.feeds.recipes import BasicNewsRecipe
class NYTimes(BasicNewsRecipe):
title = 'New England Journal of Medicine'
__author__ = 'Krittika Goyal'
__author__ = 'Kovid Goyal'
description = 'Medical news'
timefmt = ' [%d %b, %Y]'
needs_subscription = True
language = 'en'
no_stylesheets = True
remove_tags_before = dict(name='div', attrs={'align':'center'})
remove_tags_after = dict(name='ol', attrs={'compact':'COMPACT'})
remove_tags = [
dict(name='iframe'),
#dict(name='div', attrs={'class':'related-articles'}),
dict(name='div', attrs={'id':['sidebar']}),
#dict(name='form', attrs={'onsubmit':"return verifySearch(this.w,'Keyword, citation, or author')"}),
dict(name='table', attrs={'align':'RIGHT'}),
]
keep_only_tags = dict(id='content')
#TO LOGIN
@ -38,61 +29,50 @@ class NYTimes(BasicNewsRecipe):
#TO GET ARTICLE TOC
def nejm_get_index(self):
return self.index_to_soup('http://content.nejm.org/current.dtl')
return self.index_to_soup('http://content.nejm.org/current.dtl')
# To parse artice toc
def parse_index(self):
parse_soup = self.nejm_get_index()
parse_soup = self.nejm_get_index()
div = parse_soup.find(id='centerTOC')
feeds = []
current_section = None
current_articles = []
feeds = []
for x in div.findAll(True):
if x.name == 'img' and '/toc/' in x.get('src', '') and 'uarrow.gif' not in x.get('src', ''):
# Section heading found
if current_articles and current_section and 'Week in the' not in current_section:
feeds.append((current_section, current_articles))
current_section = x.get('alt')
current_articles = []
self.log('\tFound section:', current_section)
if current_section is not None and x.name == 'strong':
title = self.tag_to_string(x)
a = x.parent.find('a', href=lambda x: x and '/full/' in x)
if a is None:
continue
url = a.get('href', False)
if not url or not title:
continue
if url.startswith('/'):
url = 'http://content.nejm.org'+url
self.log('\t\tFound article:', title)
self.log('\t\t\t', url)
if url.startswith('/'):
url = 'http://online.wsj.com'+url
current_articles.append({'title': title, 'url':url,
'description':'', 'date':''})
if current_articles and current_section:
feeds.append((current_section, current_articles))
return feeds
def preprocess_html(self, soup):
for a in soup.findAll(text=lambda x: x and '[in this window]' in x):
a = a.findParent('a')
url = a.get('href', None)
if not url:
div = parse_soup.find(attrs={'class':'tocContent'})
for group in div.findAll(attrs={'class':'articleGrouping'}):
feed_title = group.find(attrs={'class':'articleType'})
if feed_title is None:
continue
if url.startswith('/'):
url = 'http://content.nejm.org'+url
isoup = self.index_to_soup(url)
img = isoup.find('img', src=lambda x: x and
x.startswith('/content/'))
if img is not None:
img.extract()
table = a.findParent('table')
table.replaceWith(img)
return soup
feed_title = self.tag_to_string(feed_title)
articles = []
self.log('Found section:', feed_title)
for art in group.findAll(attrs={'class':lambda x: x and 'articleEntry'
in x}):
link = art.find(attrs={'class':lambda x:x and 'articleLink' in
x})
if link is None:
continue
a = link.find('a', href=True)
if a is None:
continue
url = a.get('href')
if url.startswith('/'):
url = 'http://www.nejm.org'+url
title = self.tag_to_string(a)
self.log.info('\tFound article:', title, 'at', url)
article = {'title':title, 'url':url, 'date':''}
au = art.find(attrs={'class':'articleAuthors'})
if au is not None:
article['author'] = self.tag_to_string(au)
desc = art.find(attrs={'class':'hover_text'})
if desc is not None:
desc = self.tag_to_string(desc)
if 'author' in article:
desc = ' by ' + article['author'] + ' ' +desc
article['description'] = desc
articles.append(article)
if articles:
feeds.append((feed_title, articles))
return feeds

View File

@ -5,6 +5,7 @@ newscientist.com
'''
import re
import urllib
from calibre.web.feeds.news import BasicNewsRecipe
class NewScientist(BasicNewsRecipe):
@ -24,7 +25,7 @@ class NewScientist(BasicNewsRecipe):
needs_subscription = 'optional'
extra_css = """
body{font-family: Arial,sans-serif}
img{margin-bottom: 0.8em}
img{margin-bottom: 0.8em; display: block}
.quotebx{font-size: x-large; font-weight: bold; margin-right: 2em; margin-left: 2em}
"""
@ -41,12 +42,14 @@ class NewScientist(BasicNewsRecipe):
def get_browser(self):
br = BasicNewsRecipe.get_browser()
br.open('http://www.newscientist.com/')
if self.username is not None and self.password is not None:
br.open('https://www.newscientist.com/user/login?redirectURL=')
br.select_form(nr=2)
br['loginId' ] = self.username
br['password'] = self.password
br.submit()
if self.username is not None and self.password is not None:
br.open('https://www.newscientist.com/user/login')
data = urllib.urlencode({ 'source':'form'
,'redirectURL':''
,'loginId':self.username
,'password':self.password
})
br.open('https://www.newscientist.com/user/login',data)
return br
remove_tags = [
@ -55,21 +58,22 @@ class NewScientist(BasicNewsRecipe):
,dict(name='p' , attrs={'class':['marker','infotext' ]})
,dict(name='meta' , attrs={'name' :'description' })
,dict(name='a' , attrs={'rel' :'tag' })
,dict(name='ul' , attrs={'class':'markerlist' })
,dict(name=['link','base','meta','iframe','object','embed'])
]
remove_tags_after = dict(attrs={'class':['nbpcopy','comments']})
remove_attributes = ['height','width','lang']
remove_attributes = ['height','width','lang','onclick']
feeds = [
(u'Latest Headlines' , u'http://feeds.newscientist.com/science-news' )
,(u'Magazine' , u'http://www.newscientist.com/feed/magazine' )
,(u'Health' , u'http://www.newscientist.com/feed/view?id=2&type=channel' )
,(u'Life' , u'http://www.newscientist.com/feed/view?id=3&type=channel' )
,(u'Space' , u'http://www.newscientist.com/feed/view?id=6&type=channel' )
,(u'Physics and Mathematics' , u'http://www.newscientist.com/feed/view?id=4&type=channel' )
,(u'Environment' , u'http://www.newscientist.com/feed/view?id=1&type=channel' )
,(u'Science in Society' , u'http://www.newscientist.com/feed/view?id=5&type=channel' )
,(u'Tech' , u'http://www.newscientist.com/feed/view?id=7&type=channel' )
(u'Latest Headlines' , u'http://feeds.newscientist.com/science-news' )
,(u'Magazine' , u'http://feeds.newscientist.com/magazine' )
,(u'Health' , u'http://feeds.newscientist.com/health' )
,(u'Life' , u'http://feeds.newscientist.com/life' )
,(u'Space' , u'http://feeds.newscientist.com/space' )
,(u'Physics and Mathematics' , u'http://feeds.newscientist.com/physics-math' )
,(u'Environment' , u'http://feeds.newscientist.com/environment' )
,(u'Science in Society' , u'http://feeds.newscientist.com/science-in-society' )
,(u'Tech' , u'http://feeds.newscientist.com/tech' )
]
def get_article_url(self, article):
@ -79,11 +83,21 @@ class NewScientist(BasicNewsRecipe):
return url + '?full=true&print=true'
def preprocess_html(self, soup):
if soup.html.has_key('id'):
del soup.html['id']
for item in soup.findAll(style=True):
del item['style']
for item in soup.findAll(['quote','quotetext']):
item.name='p'
for item in soup.findAll(['xref','figref']):
tstr = item.string
item.replaceWith(tstr)
for tg in soup.findAll('a'):
if tg.string == 'Home':
tg.parent.extract()
return self.adeify_images(soup)
return self.adeify_images(soup)
else:
if tg.string is not None:
tstr = tg.string
tg.replaceWith(tstr)
return soup

View File

@ -10,8 +10,8 @@ import mechanize
from calibre.ptempfile import PersistentTemporaryFile
class NikkeiNet_sub_life(BasicNewsRecipe):
title = u'\u65e5\u7d4c\u65b0\u805e\u96fb\u5b50\u7248(\u751f\u6d3b)'
class NikkeiNet_sub_shakai(BasicNewsRecipe):
title = u'\u65e5\u7d4c\u65b0\u805e\u96fb\u5b50\u7248(Social)'
__author__ = 'Hiroshi Miura'
description = 'News and current market affairs from Japan'
cover_url = 'http://parts.nikkei.com/parts/ds/images/common/logo_r1.svg'

View File

@ -0,0 +1,58 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Based on Lars Jacob's Taz Digiabo recipe
__license__ = 'GPL v3'
__copyright__ = '2010, veezh'
'''
www.nrc.nl
'''
import os, urllib2, zipfile
import time
from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ptempfile import PersistentTemporaryFile
class NRCHandelsblad(BasicNewsRecipe):
title = u'NRC Handelsblad'
description = u'De EPUB-versie van NRC'
language = 'nl'
lang = 'nl-NL'
__author__ = 'veezh'
conversion_options = {
'no_default_epub_cover' : True
}
def build_index(self):
today = time.strftime("%Y%m%d")
domain = "http://digitaleeditie.nrc.nl"
url = domain + "/digitaleeditie/helekrant/epub/nrc_" + today + ".epub"
# print url
try:
f = urllib2.urlopen(url)
except urllib2.HTTPError:
self.report_progress(0,_('Kan niet inloggen om editie te downloaden'))
raise ValueError('Krant van vandaag nog niet beschikbaar')
tmp = PersistentTemporaryFile(suffix='.epub')
self.report_progress(0,_('downloading epub'))
tmp.write(f.read())
tmp.close()
zfile = zipfile.ZipFile(tmp.name, 'r')
self.report_progress(0,_('extracting epub'))
zfile.extractall(self.output_dir)
tmp.close()
index = os.path.join(self.output_dir, 'content.opf')
self.report_progress(1,_('epub downloaded and extracted'))
return index

View File

@ -0,0 +1,58 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
'''
paperli
'''
from calibre.web.feeds.news import BasicNewsRecipe
from calibre import strftime
class paperli_topics(BasicNewsRecipe):
# Customize this recipe and change paperli_tag and title below to
# download news on your favorite tag
paperli_tag = 'climate'
title = u'The #climate Daily - paperli'
#-------------------------------------------------------------
__author__ = 'Hiroshi Miura'
oldest_article = 7
max_articles_per_feed = 100
description = 'paper.li page about '+ paperli_tag
publisher = 'paper.li'
category = 'paper.li'
language = 'en'
encoding = 'utf-8'
remove_javascript = True
masthead_title = u'The '+ paperli_tag +' Daily'
timefmt = '[%y/%m/%d]'
base_url = 'http://paper.li'
index = base_url+'/tag/'+paperli_tag
def parse_index(self):
# get topics
topics = []
soup = self.index_to_soup(self.index)
topics_lists = soup.find('div',attrs={'class':'paper-nav-bottom'})
for item in topics_lists.findAll('li', attrs={'class':""}):
itema = item.find('a',href=True)
topics.append({'title': itema.string, 'url': itema['href']})
#get feeds
feeds = []
for topic in topics:
newsarticles = []
soup = self.index_to_soup(''.join([self.base_url, topic['url'] ]))
topstories = soup.findAll('div',attrs={'class':'yui-u'})
for itt in topstories:
itema = itt.find('a',href=True,attrs={'class':'ts'})
if itema is not None:
itemd = itt.find('div',text=True, attrs={'class':'text'})
newsarticles.append({
'title' :itema.string
,'date' :strftime(self.timefmt)
,'url' :itema['href']
,'description':itemd.string
})
feeds.append((topic['title'], newsarticles))
return feeds

View File

@ -13,14 +13,16 @@ class Radikal_tr(BasicNewsRecipe):
description = 'News from Turkey'
publisher = 'radikal'
category = 'news, politics, Turkey'
oldest_article = 2
oldest_article = 7
max_articles_per_feed = 150
no_stylesheets = True
encoding = 'cp1254'
use_embedded_content = False
masthead_url = 'http://www.radikal.com.tr/D/i/1/V2/radikal_logo.jpg'
language = 'tr'
extra_css = ' @font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)} .article_description,body{font-family: Arial,Verdana,Helvetica,sans1,sans-serif } '
extra_css = """ @font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)}
.article_description,body{font-family: Arial,Verdana,Helvetica,sans1,sans-serif}
"""
conversion_options = {
'comment' : description
@ -34,7 +36,13 @@ class Radikal_tr(BasicNewsRecipe):
remove_tags_after = dict(attrs={'id':'haberDetayYazi'})
feeds = [(u'Yazarlar', u'http://www.radikal.com.tr/d/rss/RssYazarlar.xml')]
feeds = [
(u'Yazarlar' , u'http://www.radikal.com.tr/d/rss/RssYazarlar.xml')
,(u'Turkiye' , u'http://www.radikal.com.tr/d/rss/Rss_97.xml' )
,(u'Politika' , u'http://www.radikal.com.tr/d/rss/Rss_98.xml' )
,(u'Dis Haberler', u'http://www.radikal.com.tr/d/rss/Rss_100.xml' )
,(u'Ekonomi' , u'http://www.radikal.com.tr/d/rss/Rss_101.xml' )
]
def print_version(self, url):
articleid = url.rpartition('ArticleID=')[2]

View File

@ -0,0 +1,47 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '11 December 2010, desUBIKado'
__author__ = 'desUBIKado'
__description__ = 'Entertainment guide from Aragon'
__version__ = 'v0.01'
__date__ = '11, December 2010'
'''
[url]http://www.redaragon.es/[/url]
'''
from calibre.web.feeds.news import BasicNewsRecipe
class heraldo(BasicNewsRecipe):
__author__ = 'desUBIKado'
description = u'Guia de ocio desde Aragon'
title = u'RedAragon'
publisher = 'Grupo Z'
category = 'Concerts, Movies, Entertainment news'
cover_url = 'http://www.redaragon.com/2008_img/logotipo.gif'
language = 'es'
timefmt = '[%a, %d %b, %Y]'
oldest_article = 15
max_articles_per_feed = 100
encoding = 'iso-8859-1'
use_embedded_content = False
remove_javascript = True
no_stylesheets = True
feeds = [(u'Conciertos', u'http://redaragon.com/rss/agenda.asp?tid=1'),
(u'Exposiciones', u'http://redaragon.com/rss/agenda.asp?tid=5'),
(u'Teatro', u'http://redaragon.com/rss/agenda.asp?tid=10'),
(u'Conferencias', u'http://redaragon.com/rss/agenda.asp?tid=2'),
(u'Ferias', u'http://redaragon.com/rss/agenda.asp?tid=6'),
(u'Filmotecas/Cineclubs', u'http://redaragon.com/rss/agenda.asp?tid=7'),
(u'Presentaciones', u'http://redaragon.com/rss/agenda.asp?tid=9'),
(u'Fiestas', u'http://redaragon.com/rss/agenda.asp?tid=11'),
(u'Infantil', u'http://redaragon.com/rss/agenda.asp?tid=13'),
(u'Otros', u'http://redaragon.com/rss/agenda.asp?tid=8')]
keep_only_tags = [dict(name='div', attrs={'id':'FichaEventoAgenda'})]
remove_tags = [dict(name='div', attrs={'class':['Comparte','CajaAgenda','Caja','Cintillo']})]
remove_tags_before = dict(name='div' , attrs={'id':'FichaEventoAgenda'})
remove_tags_after = dict(name='div' , attrs={'class':'Cintillo'})

View File

@ -25,22 +25,20 @@ class Salon_com(BasicNewsRecipe):
feeds = [
('News & Politics', 'http://feeds.salon.com/salon/news'),
('War Room', 'http://feeds.salon.com/salon/war_room'),
('Arts & Entertainment', 'http://feeds.salon.com/salon/ent'),
('I Like to Watch', 'http://feeds.salon.com/salon/iltw'),
('Beyond Multiplex', 'http://feeds.salon.com/salon/btm'),
('Book Reviews', 'http://feeds.salon.com/salon/books'),
('All Life', 'http://feeds.salon.com/salon/mwt'),
('All Opinion', 'http://feeds.salon.com/salon/opinion'),
('Glenn Greenwald', 'http://feeds.salon.com/salon/greenwald'),
('Garrison Keillor', 'http://dir.salon.com/topics/garrison_keillor/index.rss'),
('Joan Walsh', 'http://www.salon.com/rss/walsh.rss'),
('All Sports', 'http://feeds.salon.com/salon/sports'),
('War Room', 'http://feeds.feedburner.com/salon/war_room'),
('Joan Walsh', 'http://feeds.feedburner.com/Salon_Joan_Walsh'),
('Glenn Greenwald', 'http://feeds.feedburner.com/salon/greenwald'),
('Tech & Business', 'http://feeds.salon.com/salon/tech'),
('How World Works', 'http://feeds.salon.com/salon/htww')
('Ask the Pilot', 'http://feeds.feedburner.com/salon/ask_the_pilot'),
('How World Works', 'http://feeds.feedburner.com/salon/htww'),
('Life', 'http://feeds.feedburner.com/salon/mwt'),
('Broadsheet', 'http://feeds.feedburner.com/salon/broadsheet'),
('Movie Reviews', 'http://feeds.feedburner.com/salon/movie_reviews'),
('Film Salon', 'http://feeds.feedburner.com/Salon/Film_Salon'),
('TV', 'http://feeds.feedburner.com/salon/tv'),
('Books', 'http://feeds.feedburner.com/salon/books')
]
def print_version(self, url):
return url.replace('/index.html', '/print.html')

View File

@ -0,0 +1,42 @@
#!/usr/bin/env python
import re
from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import Tag
class SBM(BasicNewsRecipe):
title = 'Science Based Medicine'
__author__ = 'BuzzKill'
description = 'Exploring issues and controversies in the relationship between science and medicine'
oldest_article = 5
max_articles_per_feed = 15
no_stylesheets = True
use_embedded_content = False
encoding = 'utf-8'
publisher = 'SBM'
category = 'science, sbm, ebm, blog, pseudoscience'
language = 'en'
lang = 'en-US'
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : lang
, 'pretty_print' : True
}
keep_only_tags = [
dict(name='a', attrs={'title':re.compile(r'Posts by.*', re.DOTALL|re.IGNORECASE)}),
dict(name='div', attrs={'class':'entry'})
]
feeds = [(u'Science Based Medicine', u'http://www.sciencebasedmedicine.org/?feed=rss2')]
def preprocess_html(self, soup):
mtag = Tag(soup,'meta',[('http-equiv','Content-Type'),('context','text/html; charset=utf-8')])
soup.head.insert(0,mtag)
soup.html['lang'] = self.lang
return self.adeify_images(soup)

View File

@ -1,17 +1,19 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
__copyright__ = '2010, JOlo'
'''
www.theweek.com
'''
from calibre.web.feeds.news import BasicNewsRecipe
import re
class TheWeekFree(BasicNewsRecipe):
title = 'The Week Magazine - Free content'
__author__ = 'Darko Miletic'
class TheWeek(BasicNewsRecipe):
title = 'The Week Magazine'
__author__ = 'Jim Olo'
description = "The best of the US and international media. Daily coverage of commentary and analysis of the day's events, as well as arts, entertainment, people and gossip, and political cartoons."
publisher = 'The Week Publications, Inc.'
masthead_url = 'http://test.theweek.com/images/logo_theweek.gif'
cover_url = masthead_url
category = 'news, politics, USA'
oldest_article = 7
max_articles_per_feed = 100
@ -19,31 +21,27 @@ class TheWeekFree(BasicNewsRecipe):
encoding = 'utf-8'
use_embedded_content = False
language = 'en'
preprocess_regexps = [(re.compile(r'<h3><a href=.*</body>', re.DOTALL), lambda match: '</body>')]
remove_tags_before = dict(name='h1')
remove_tags_after = dict(name='div', attrs={'class':'articleSubscribe4free'})
remove_tags = [
dict(name='div', attrs={'class':['floatLeft','imageCaption','slideshowImageAttribution','postDate','utilities','cartoonInfo','left','middle','col300','articleSubscribe4free',' articleFlyout','articleFlyout floatRight','fourFreeBar']})
,dict(name='div', attrs={'id':['cartoonThumbs','rightColumn','header','partners']})
,dict(name='ul', attrs={'class':['slideshowNav','hotTopicsList topicList']})
]
remove_attributes = ['width','height', 'style', 'font', 'color']
extra_css = '''
h1{font-family:Geneva, Arial, Helvetica, sans-serif;color:#154B7A;}
h3{font-size: 14px;color:#999999; font-family:Geneva, Arial, Helvetica, sans-serif;font-weight: bold;}
h2{color:#666666; font-family:Geneva, Arial, Helvetica, sans-serif;font-size:small;}
p {font-family:Arial,Helvetica,sans-serif;}
'''
filter_regexps = [r'www\.palmcoastdata\.com']
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
}
keep_only_tags = [
dict(name=['h1','h2'])
, dict(name='div', attrs={'class':'basefont'})
, dict(name='div', attrs={'id':'slideshowLoader'})
]
remove_tags = [
dict(name='div', attrs={'id':['digg_dugg','articleRight','dateHeader']})
,dict(name=['object','embed','iframe'])
]
feeds = [
(u'News & Opinions' , u'http://www.theweek.com/section/index/news_opinion.rss')
,(u'Arts & Leisure' , u'http://www.theweek.com/section/index/arts_leisure.rss')
,(u'Business' , u'http://www.theweek.com/section/index/business.rss' )
,(u'Cartoon & Short takes' , u'http://www.theweek.com/section/index/cartoons_wit.rss')
]
feeds = [
(u'News-Opinion', u'http://theweek.com/section/index/news_opinion.rss'),
(u'Business', u'http://theweek.com/section/index/business.rss'),
(u'Arts-Life', u'http://theweek.com/section/index/arts_life.rss'),
(u'Cartoons', u'http://theweek.com/section/index/cartoon_wit/0/all-cartoons.rss')
]

View File

@ -4,6 +4,7 @@ __copyright__ = '2008-2010, Darko Miletic <darko.miletic at gmail.com>'
economictimes.indiatimes.com
'''
from calibre.web.feeds.news import BasicNewsRecipe
class TheEconomicTimes(BasicNewsRecipe):
@ -32,18 +33,17 @@ class TheEconomicTimes(BasicNewsRecipe):
, 'language' : language
}
keep_only_tags = [dict(attrs={'class':'printdiv'})]
remove_tags = [dict(name=['object','link','embed','iframe','base','table','meta'])]
remove_attributes = ['name']
remove_tags_before = dict(name='h1')
feeds = [(u'All articles', u'http://economictimes.indiatimes.com/rssfeedsdefault.cms')]
def print_version(self, url):
rest, sep, art = url.rpartition('/articleshow/')
return 'http://m.economictimes.com/PDAET/articleshow/' + art
return 'http://economictimes.indiatimes.com/articleshow/' + art + '?prtpage=1'
def get_article_url(self, article):
rurl = article.get('link', None)
rurl = article.get('guid', None)
if (rurl.find('/quickieslist/') > 0) or (rurl.find('/quickiearticleshow/') > 0):
return None
return rurl

View File

@ -8,9 +8,10 @@ class TimesOfIndia(BasicNewsRecipe):
max_articles_per_feed = 25
no_stylesheets = True
keep_only_tags = [dict(attrs={'class':'prttabl'})]
keep_only_tags = [dict(attrs={'class':'maintable12'})]
remove_tags = [
dict(style=lambda x: x and 'float' in x)
dict(style=lambda x: x and 'float' in x),
dict(attrs={'class':'prvnxtbg'}),
]
feeds = [

View File

@ -0,0 +1,36 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Hiroshi Miura <miurahr@linux.com>'
'''
http://ameblo.jp/sauta19/
'''
import re
from calibre.web.feeds.news import BasicNewsRecipe
class UniNoHimituKichiBlog(BasicNewsRecipe):
title = u'Uni secret base'
__author__ = 'Hiroshi Miura'
oldest_article = 2
publication_type = 'blog'
max_articles_per_feed = 20
description = 'Japanese famous Cat blog'
publisher = ''
category = 'cat, pet, japan'
language = 'ja'
encoding = 'utf-8'
feeds = [(u'blog', u'http://feedblog.ameba.jp/rss/ameblo/sauta19/rss20.xml')]
def parse_feeds(self):
feeds = BasicNewsRecipe.parse_feeds(self)
for curfeed in feeds:
delList = []
for a,curarticle in enumerate(curfeed.articles):
if re.search(r'rssad.jp', curarticle.url):
delList.append(curarticle)
if len(delList)>0:
for d in delList:
index = curfeed.articles.index(d)
curfeed.articles[index:index+1] = []
return feeds

View File

@ -0,0 +1,62 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Derek Liang <Derek.liang.ca @@@at@@@ gmail.com>'
'''
wenxuecity.com
'''
import re
from calibre.web.feeds.news import BasicNewsRecipe
class TheCND(BasicNewsRecipe):
title = 'wenxuecity - znjy'
__author__ = 'Derek Liang'
description = ''
INDEX = 'http://bbs.wenxuecity.com/znjy/?elite=1'
language = 'zh'
conversion_options = {'linearize_tables':True}
remove_tags_before = dict(name='div', id='message')
remove_tags_after = dict(name='div', id='message')
remove_tags = [dict(name='div', id='postmeta'), dict(name='div', id='footer')]
no_stylesheets = True
preprocess_regexps = [(re.compile(r'<!--.*?-->', re.DOTALL), lambda m: '')]
def print_version(self, url):
return url + '?print'
def parse_index(self):
soup = self.index_to_soup(self.INDEX)
feeds = []
articles = {}
for a in soup.findAll('a', attrs={'class':'post'}):
url = a['href']
if url.startswith('/'):
url = 'http://bbs.wenxuecity.com'+url
title = self.tag_to_string(a)
self.log('\tFound article: ', title, ' at:', url)
dateReg = re.search( '(\d\d?)/(\d\d?)/(\d\d)', self.tag_to_string(a.parent) )
date = '%(y)s/%(m)02d/%(d)02d' % {'y' : dateReg.group(3), 'm' : int(dateReg.group(1)), 'd' : int(dateReg.group(2)) }
if not articles.has_key(date):
articles[date] = []
articles[date].append({'title':title, 'url':url, 'description': '', 'date':''})
self.log('\t\tAppend to : ', date)
self.log('log articles', articles)
mostCurrent = sorted(articles).pop()
self.title = '文学城 - 子女教育 - ' + mostCurrent
feeds.append((self.title, articles[mostCurrent]))
return feeds
def populate_article_metadata(self, article, soup, first):
header = soup.find('h3')
self.log('header: ' + self.tag_to_string(header))
pass

View File

@ -38,12 +38,12 @@ class Wired(BasicNewsRecipe):
keep_only_tags = [dict(name='div', attrs={'class':'post'})]
remove_tags_after = dict(name='div', attrs={'class':'tweetmeme_button'})
remove_tags = [
dict(name=['object','embed','iframe','link'])
dict(name=['object','embed','iframe','link','meta','base'])
,dict(name='div', attrs={'class':['podcast_storyboard','tweetmeme_button']})
,dict(attrs={'id':'ff_bottom_nav'})
,dict(name='a',attrs={'href':'http://www.wired.com/app'})
]
remove_attributes = ['height','width']
remove_attributes = ['height','width','lang','border','clear']
def parse_index(self):
@ -78,7 +78,9 @@ class Wired(BasicNewsRecipe):
divurl = item.find('div',attrs={'class':'feature-header'})
if divurl:
divdesc = item.find('div',attrs={'class':'feature-text'})
url = 'http://www.wired.com' + divurl.a['href']
url = divurl.a['href']
if not divurl.a['href'].startswith('http://www.wired.com'):
url = 'http://www.wired.com' + divurl.a['href']
title = self.tag_to_string(divurl.a)
description = self.tag_to_string(divdesc)
date = strftime(self.timefmt)
@ -127,5 +129,17 @@ class Wired(BasicNewsRecipe):
def preprocess_html(self, soup):
for item in soup.findAll(style=True):
del item['style']
for item in soup.findAll('a'):
if item.string is not None:
tstr = item.string
item.replaceWith(tstr)
else:
item.name='span'
for atrs in ['href','target','alt','title','name','id']:
if item.has_key(atrs):
del item[atrs]
for item in soup.findAll('img'):
if not item.has_key('alt'):
item['alt'] = 'image'
return soup

View File

@ -46,7 +46,7 @@ class WallStreetJournal(BasicNewsRecipe):
br = BasicNewsRecipe.get_browser()
if self.username is not None and self.password is not None:
br.open('http://commerce.wsj.com/auth/login')
br.select_form(nr=0)
br.select_form(nr=1)
br['user'] = self.username
br['password'] = self.password
res = br.submit()

View File

@ -60,8 +60,8 @@ class ZeitDe(BasicNewsRecipe):
for tag in soup.findAll(name=['ul','li']):
tag.name = 'div'
soup.html['xml:lang'] = self.lang
soup.html['lang'] = self.lang
soup.html['xml:lang'] = self.language.replace('_', '-')
soup.html['lang'] = self.language.replace('_', '-')
mtag = '<meta http-equiv="Content-Type" content="text/html; charset=' + self.encoding + '">'
soup.head.insert(0,mtag)
return soup

View File

@ -318,7 +318,11 @@ class LinuxFreeze(Command):
import codecs
def set_default_encoding():
locale.setlocale(locale.LC_ALL, '')
try:
locale.setlocale(locale.LC_ALL, '')
except:
print 'WARNING: Failed to set default libc locale, using en_US.UTF-8'
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
enc = locale.getdefaultlocale()[1]
if not enc:
enc = locale.nl_langinfo(locale.CODESET)

View File

@ -36,6 +36,16 @@ Install BeautifulSoup 3.0.x manually into site-packages (3.1.x parses broken HTM
Install pywin32 and edit win32com\__init__.py setting _frozen = True and
__gen_path__ to a temp dir (otherwise it tries to set it to a dir in the install tree which leads to permission errors)
Note that you should use::
import tempfile
__gen_path__ = os.path.join(
tempfile.gettempdir(), "gen_py",
"%d.%d" % (sys.version_info[0], sys.version_info[1]))
Use gettempdir instead of the win32 api method as gettempdir returns a temp dir that is guaranteed to actually work.
Also edit win32com\client\gencache.py and change the except IOError on line 57 to catch all exceptions.
SQLite

View File

@ -2,7 +2,7 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
__appname__ = 'calibre'
__version__ = '0.7.33'
__version__ = '0.7.35'
__author__ = "Kovid Goyal <kovid@kovidgoyal.net>"
import re

View File

@ -80,6 +80,34 @@ class Plugin(object): # {{{
'''
pass
def load_resources(self, names):
'''
If this plugin comes in a ZIP file (user added plugin), this method
will allow you to load resources from the ZIP file.
For example to load an image::
pixmap = QPixmap()
pixmap.loadFromData(self.load_resources(['images/icon.png']).itervalues().next())
icon = QIcon(pixmap)
:param names: List of paths to resources in the zip file using / as separator
:return: A dictionary of the form ``{name : file_contents}``. Any names
that were not found in the zip file will not be present in the
dictionary.
'''
if self.plugin_path is None:
raise ValueError('This plugin was not loaded from a ZIP file')
ans = {}
with zipfile.ZipFile(self.plugin_path, 'r') as zf:
for candidate in zf.namelist():
if candidate in names:
ans[candidate] = zf.read(candidate)
return ans
def customization_help(self, gui=False):
'''
Return a string giving help on how to customize this plugin.

View File

@ -457,7 +457,8 @@ from calibre.devices.blackberry.driver import BLACKBERRY
from calibre.devices.cybook.driver import CYBOOK, ORIZON
from calibre.devices.eb600.driver import EB600, COOL_ER, SHINEBOOK, \
POCKETBOOK360, GER2, ITALICA, ECLICTO, DBOOK, INVESBOOK, \
BOOQ, ELONEX, POCKETBOOK301, MENTOR, POCKETBOOK602
BOOQ, ELONEX, POCKETBOOK301, MENTOR, POCKETBOOK602, \
POCKETBOOK701
from calibre.devices.iliad.driver import ILIAD
from calibre.devices.irexdr.driver import IREXDR1000, IREXDR800
from calibre.devices.jetbook.driver import JETBOOK, MIBUK, JETBOOK_MINI
@ -473,12 +474,14 @@ from calibre.devices.binatone.driver import README
from calibre.devices.hanvon.driver import N516, EB511, ALEX, AZBOOKA, THEBOOK
from calibre.devices.edge.driver import EDGE
from calibre.devices.teclast.driver import TECLAST_K3, NEWSMY, IPAPYRUS, \
SOVOS, PICO
SOVOS, PICO, SUNSTECH_EB700
from calibre.devices.sne.driver import SNE
from calibre.devices.misc import PALMPRE, AVANT, SWEEX, PDNOVEL, KOGAN, \
GEMEI, VELOCITYMICRO, PDNOVEL_KOBO, Q600, LUMIREAD
GEMEI, VELOCITYMICRO, PDNOVEL_KOBO, Q600, LUMIREAD, ALURATEK_COLOR, \
TREKSTOR, EEEREADER
from calibre.devices.folder_device.driver import FOLDER_DEVICE_FOR_CONFIG
from calibre.devices.kobo.driver import KOBO
from calibre.devices.bambook.driver import BAMBOOK
from calibre.ebooks.metadata.fetch import GoogleBooks, ISBNDB, Amazon, \
LibraryThing
@ -545,9 +548,7 @@ plugins += [
JETBOOK_MINI,
MIBUK,
SHINEBOOK,
POCKETBOOK360,
POCKETBOOK301,
POCKETBOOK602,
POCKETBOOK360, POCKETBOOK301, POCKETBOOK602, POCKETBOOK701,
KINDLE,
KINDLE2,
KINDLE_DX,
@ -580,7 +581,7 @@ plugins += [
ELONEX,
TECLAST_K3,
NEWSMY,
PICO,
PICO, SUNSTECH_EB700,
IPAPYRUS,
SOVOS,
EDGE,
@ -601,6 +602,10 @@ plugins += [
VELOCITYMICRO,
PDNOVEL_KOBO,
LUMIREAD,
ALURATEK_COLOR,
BAMBOOK,
TREKSTOR,
EEEREADER,
ITUNES,
]
plugins += [x for x in list(locals().values()) if isinstance(x, type) and \

View File

@ -696,8 +696,9 @@ class BambookOutput(OutputProfile):
short_name = 'bambook'
description = _('This profile is intended for the Sanda Bambook.')
# Screen size is a best guess
screen_size = (600, 800)
# Screen size is for full screen display
screen_size = (580, 780)
# Comic size is for normal display
comic_screen_size = (540, 700)
dpi = 168.451
fbase = 12

View File

@ -23,6 +23,12 @@ Run an embedded python interpreter.
help='Debug the specified device driver.')
parser.add_option('-g', '--gui', default=False, action='store_true',
help='Run the GUI',)
parser.add_option('--gui-debug', default=None,
help='Run the GUI with a debug console, logging to the'
' specified path',)
parser.add_option('--show-gui-debug', default=None,
help='Display the specified log file.',)
parser.add_option('-w', '--viewer', default=False, action='store_true',
help='Run the ebook viewer',)
parser.add_option('--paths', default=False, action='store_true',
@ -135,7 +141,28 @@ def add_simple_plugin(path_to_plugin):
os.chdir(odir)
shutil.rmtree(tdir)
def run_debug_gui(logpath):
import time, platform
time.sleep(3) # Give previous GUI time to shutdown fully and release locks
from calibre.constants import __appname__, __version__, isosx
print __appname__, _('Debug log')
print __appname__, __version__
print platform.platform()
print platform.system()
print platform.system_alias(platform.system(), platform.release(),
platform.version())
print 'Python', platform.python_version()
try:
if iswindows:
print 'Windows:', platform.win32_ver()
elif isosx:
print 'OSX:', platform.mac_ver()
else:
print 'Linux:', platform.linux_distribution()
except:
pass
from calibre.gui2.main import main
main(['__CALIBRE_GUI_DEBUG__', logpath])
def main(args=sys.argv):
from calibre.constants import debug
@ -154,6 +181,20 @@ def main(args=sys.argv):
if opts.gui:
from calibre.gui2.main import main
main(['calibre'])
elif opts.gui_debug is not None:
run_debug_gui(opts.gui_debug)
elif opts.show_gui_debug:
import time, re
time.sleep(1)
from calibre.gui2 import open_local_file
if iswindows:
with open(opts.show_gui_debug, 'r+b') as f:
raw = f.read()
raw = re.sub('(?<!\r)\n', '\r\n', raw)
f.seek(0)
f.truncate()
f.write(raw)
open_local_file(opts.show_gui_debug)
elif opts.viewer:
from calibre.gui2.viewer.main import main
vargs = ['ebook-viewer', '--debug-javascript']

View File

@ -24,11 +24,11 @@ class ANDROID(USBMS):
0xc92 : [0x100], 0xc97: [0x226]},
# Eken
0x040d : { 0x8510 : [0x0001] },
0x040d : { 0x8510 : [0x0001], 0x0851 : [0x1] },
# Motorola
0x22b8 : { 0x41d9 : [0x216], 0x2d67 : [0x100], 0x41db : [0x216],
0x4285 : [0x216]},
0x4285 : [0x216], 0x42a3 : [0x216] },
# Sony Ericsson
0xfce : { 0xd12e : [0x0100]},
@ -49,8 +49,9 @@ class ANDROID(USBMS):
# Dell
0x413c : { 0xb007 : [0x0100, 0x0224]},
# Eken?
0x040d : { 0x0851 : [0x0001]},
# LG
0x1004 : { 0x61cc : [0x100] },
}
EBOOK_DIR_MAIN = ['eBooks/import', 'wordplayer/calibretransfer', 'Books']
EXTRA_CUSTOMIZATION_MESSAGE = _('Comma separated list of directories to '
@ -59,13 +60,13 @@ class ANDROID(USBMS):
EXTRA_CUSTOMIZATION_DEFAULT = ', '.join(EBOOK_DIR_MAIN)
VENDOR_NAME = ['HTC', 'MOTOROLA', 'GOOGLE_', 'ANDROID', 'ACER',
'GT-I5700', 'SAMSUNG', 'DELL', 'LINUX']
'GT-I5700', 'SAMSUNG', 'DELL', 'LINUX', 'GOOGLE']
WINDOWS_MAIN_MEM = ['ANDROID_PHONE', 'A855', 'A853', 'INC.NEXUS_ONE',
'__UMS_COMPOSITE', '_MB200', 'MASS_STORAGE', '_-_CARD', 'SGH-I897',
'GT-I9000', 'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID',
'SCH-I500_CARD', 'SPH-D700_CARD', 'MB810']
'SCH-I500_CARD', 'SPH-D700_CARD', 'MB810', 'GT-P1000', 'DESIRE']
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID']
'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD']
OSX_MAIN_MEM = 'HTC Android Phone Media'

View File

@ -688,7 +688,7 @@ class ITUNES(DriverBase):
if DEBUG:
self.log.info("ITUNES:get_device_information()")
return ('iDevice','hw v1.0','sw v1.0', 'mime type normally goes here')
return (self.sources['iPod'],'hw v1.0','sw v1.0', 'mime type normally goes here')
def get_file(self, path, outfile, end_session=True):
'''
@ -2775,10 +2775,19 @@ class ITUNES(DriverBase):
if lb_added:
lb_added.SortName = "%s %s" % (self.title_sorter(metadata_x.series), series_index)
lb_added.EpisodeID = metadata_x.series
try:
lb_added.TrackNumber = metadata_x.series_index
except:
if DEBUG:
self.log.warning(" iTunes automation interface reported an error"
" setting TrackNumber in iTunes")
try:
lb_added.EpisodeNumber = metadata_x.series_index
except:
pass
if DEBUG:
self.log.warning(" iTunes automation interface reported an error"
" setting EpisodeNumber in iTunes")
# If no plugboard transform applied to tags, change the Genre/Category to Series
if metadata.tags == metadata_x.tags:
@ -2792,6 +2801,13 @@ class ITUNES(DriverBase):
if db_added:
db_added.SortName = "%s %s" % (self.title_sorter(metadata_x.series), series_index)
db_added.EpisodeID = metadata_x.series
try:
db_added.TrackNumber = metadata_x.series_index
except:
if DEBUG:
self.log.warning(" iTunes automation interface reported an error"
" setting TrackNumber on iDevice")
try:
db_added.EpisodeNumber = metadata_x.series_index
except:

View File

View File

@ -0,0 +1,477 @@
# -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = '2010, Li Fanxi <lifanxi at freemindworld.com>'
__docformat__ = 'restructuredtext en'
'''
Device driver for Sanda's Bambook
'''
import time, os, hashlib
from itertools import cycle
from calibre.devices.interface import DevicePlugin
from calibre.devices.usbms.deviceconfig import DeviceConfig
from calibre.devices.bambook.libbambookcore import Bambook, text_encoding, CONN_CONNECTED, is_bambook_lib_ready
from calibre.devices.usbms.books import Book, BookList
from calibre.ebooks.metadata.book.json_codec import JsonCodec
from calibre.ptempfile import TemporaryDirectory, TemporaryFile
from calibre.constants import __appname__, __version__
from calibre.devices.errors import OpenFeedback
class BAMBOOK(DeviceConfig, DevicePlugin):
name = 'Bambook Device Interface'
description = _('Communicate with the Sanda Bambook eBook reader.')
author = _('Li Fanxi')
supported_platforms = ['windows', 'linux', 'osx']
log_packets = False
booklist_class = BookList
book_class = Book
FORMATS = [ "snb" ]
VENDOR_ID = 0x230b
PRODUCT_ID = 0x0001
BCD = None
CAN_SET_METADATA = False
THUMBNAIL_HEIGHT = 155
icon = I("devices/bambook.png")
# OPEN_FEEDBACK_MESSAGE = _(
# 'Connecting to Bambook device, please wait ...')
BACKLOADING_ERROR_MESSAGE = _(
'Unable to add book to library directly from Bambook. '
'Please save the book to disk and add the file to library from disk.')
METADATA_CACHE = '.calibre.bambook'
METADATA_FILE_GUID = 'calibremetadata.snb'
bambook = None
def reset(self, key='-1', log_packets=False, report_progress=None,
detected_device=None) :
self.open()
def open(self):
# Make sure the Bambook library is ready
if not is_bambook_lib_ready():
raise OpenFeedback(_("Unable to connect to Bambook, you need to install Bambook library first."))
# Disconnect first if connected
self.eject()
# Connect
self.bambook = Bambook()
self.bambook.Connect()
if self.bambook.GetState() != CONN_CONNECTED:
self.bambook = None
raise Exception(_("Unable to connect to Bambook."))
def eject(self):
if self.bambook:
self.bambook.Disconnect()
self.bambook = None
def post_yank_cleanup(self):
self.eject()
def set_progress_reporter(self, report_progress):
'''
:param report_progress: Function that is called with a % progress
(number between 0 and 100) for various tasks
If it is called with -1 that means that the
task does not have any progress information
'''
self.report_progress = report_progress
def get_device_information(self, end_session=True):
"""
Ask device for device information. See L{DeviceInfoQuery}.
:return: (device name, device version, software version on device, mime type)
"""
if self.bambook:
deviceInfo = self.bambook.GetDeviceInfo()
return (_("Bambook"), "SD928", deviceInfo.firmwareVersion, "MimeType")
def card_prefix(self, end_session=True):
'''
Return a 2 element list of the prefix to paths on the cards.
If no card is present None is set for the card's prefix.
E.G.
('/place', '/place2')
(None, 'place2')
('place', None)
(None, None)
'''
return (None, None)
def total_space(self, end_session=True):
"""
Get total space available on the mountpoints:
1. Main memory
2. Memory Card A
3. Memory Card B
:return: A 3 element list with total space in bytes of (1, 2, 3). If a
particular device doesn't have any of these locations it should return 0.
"""
deviceInfo = self.bambook.GetDeviceInfo()
return (deviceInfo.deviceVolume * 1024, 0, 0)
def free_space(self, end_session=True):
"""
Get free space available on the mountpoints:
1. Main memory
2. Card A
3. Card B
:return: A 3 element list with free space in bytes of (1, 2, 3). If a
particular device doesn't have any of these locations it should return -1.
"""
deviceInfo = self.bambook.GetDeviceInfo()
return (deviceInfo.spareVolume * 1024, -1, -1)
def books(self, oncard=None, end_session=True):
"""
Return a list of ebooks on the device.
:param oncard: If 'carda' or 'cardb' return a list of ebooks on the
specific storage card, otherwise return list of ebooks
in main memory of device. If a card is specified and no
books are on the card return empty list.
:return: A BookList.
"""
# Bambook has no memroy card
if oncard:
return self.booklist_class(None, None, None)
# Get metadata cache
prefix = ''
booklist = self.booklist_class(oncard, prefix, self.settings)
need_sync = self.parse_metadata_cache(booklist)
# Get book list from device
devicebooks = self.bambook.GetBookList()
books = []
for book in devicebooks:
if book.bookGuid == self.METADATA_FILE_GUID:
continue
b = self.book_class('', book.bookGuid)
b.title = book.bookName.decode(text_encoding)
b.authors = [ book.bookAuthor.decode(text_encoding) ]
b.size = 0
b.datatime = time.gmtime()
b.lpath = book.bookGuid
b.thumbnail = None
b.tags = None
b.comments = book.bookAbstract.decode(text_encoding)
books.append(b)
# make a dict cache of paths so the lookup in the loop below is faster.
bl_cache = {}
for idx, b in enumerate(booklist):
bl_cache[b.lpath] = idx
def update_booklist(book, prefix):
changed = False
try:
idx = bl_cache.get(book.lpath, None)
if idx is not None:
bl_cache[book.lpath] = None
if self.update_metadata_item(book, booklist[idx]):
changed = True
else:
if booklist.add_book(book,
replace_metadata=False):
changed = True
except: # Probably a filename encoding error
import traceback
traceback.print_exc()
return changed
# Check each book on device whether it has a correspondig item
# in metadata cache. If not, add it to cache.
for i, book in enumerate(books):
self.report_progress(i/float(len(books)), _('Getting list of books on device...'))
changed = update_booklist(book, prefix)
if changed:
need_sync = True
# Remove books that are no longer in the Bambook. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
for idx in sorted(bl_cache.itervalues(), reverse=True):
if idx is not None:
need_sync = True
del booklist[idx]
if need_sync:
self.sync_booklists((booklist, None, None))
self.report_progress(1.0, _('Getting list of books on device...'))
return booklist
def upload_books(self, files, names, on_card=None, end_session=True,
metadata=None):
'''
Upload a list of books to the device. If a file already
exists on the device, it should be replaced.
This method should raise a :class:`FreeSpaceError` if there is not enough
free space on the device. The text of the FreeSpaceError must contain the
word "card" if ``on_card`` is not None otherwise it must contain the word "memory".
:param files: A list of paths and/or file-like objects. If they are paths and
the paths point to temporary files, they may have an additional
attribute, original_file_path pointing to the originals. They may have
another optional attribute, deleted_after_upload which if True means
that the file pointed to by original_file_path will be deleted after
being uploaded to the device.
:param names: A list of file names that the books should have
once uploaded to the device. len(names) == len(files)
:param metadata: If not None, it is a list of :class:`Metadata` objects.
The idea is to use the metadata to determine where on the device to
put the book. len(metadata) == len(files). Apart from the regular
cover (path to cover), there may also be a thumbnail attribute, which should
be used in preference. The thumbnail attribute is of the form
(width, height, cover_data as jpeg).
:return: A list of 3-element tuples. The list is meant to be passed
to :meth:`add_books_to_metadata`.
'''
self.report_progress(0, _('Transferring books to device...'))
paths = []
if self.bambook:
for (i, f) in enumerate(files):
self.report_progress((i+1) / float(len(files)), _('Transferring books to device...'))
if not hasattr(f, 'read'):
if self.bambook.VerifySNB(f):
guid = self.bambook.SendFile(f, self.get_guid(metadata[i].uuid))
if guid:
paths.append(guid)
else:
print "Send fail"
else:
print "book invalid"
ret = zip(paths, cycle([on_card]))
self.report_progress(1.0, _('Transferring books to device...'))
return ret
def add_books_to_metadata(self, locations, metadata, booklists):
metadata = iter(metadata)
for i, location in enumerate(locations):
self.report_progress((i+1) / float(len(locations)), _('Adding books to device metadata listing...'))
info = metadata.next()
# Extract the correct prefix from the pathname. To do this correctly,
# we must ensure that both the prefix and the path are normalized
# so that the comparison will work. Book's __init__ will fix up
# lpath, so we don't need to worry about that here.
book = self.book_class('', location[0], other=info)
if book.size is None:
book.size = 0
b = booklists[0].add_book(book, replace_metadata=True)
if b:
b._new_book = True
self.report_progress(1.0, _('Adding books to device metadata listing...'))
def delete_books(self, paths, end_session=True):
'''
Delete books at paths on device.
'''
if self.bambook:
for i, path in enumerate(paths):
self.report_progress((i+1) / float(len(paths)), _('Removing books from device...'))
self.bambook.DeleteFile(path)
self.report_progress(1.0, _('Removing books from device...'))
def remove_books_from_metadata(self, paths, booklists):
'''
Remove books from the metadata list. This function must not communicate
with the device.
:param paths: paths to books on the device.
:param booklists: A tuple containing the result of calls to
(:meth:`books(oncard=None)`,
:meth:`books(oncard='carda')`,
:meth`books(oncard='cardb')`).
'''
for i, path in enumerate(paths):
self.report_progress((i+1) / float(len(paths)), _('Removing books from device metadata listing...'))
for bl in booklists:
for book in bl:
if book.lpath == path:
bl.remove_book(book)
self.report_progress(1.0, _('Removing books from device metadata listing...'))
def sync_booklists(self, booklists, end_session=True):
'''
Update metadata on device.
:param booklists: A tuple containing the result of calls to
(:meth:`books(oncard=None)`,
:meth:`books(oncard='carda')`,
:meth`books(oncard='cardb')`).
'''
if not self.bambook:
return
json_codec = JsonCodec()
# Create stub virtual book for sync info
with TemporaryDirectory() as tdir:
snbcdir = os.path.join(tdir, 'snbc')
snbfdir = os.path.join(tdir, 'snbf')
os.mkdir(snbcdir)
os.mkdir(snbfdir)
f = open(os.path.join(snbfdir, 'book.snbf'), 'wb')
f.write('''<book-snbf version="1.0">
<head>
<name>calibre同步信息</name>
<author>calibre</author>
<language>ZH-CN</language>
<rights/>
<publisher>calibre</publisher>
<generator>''' + __appname__ + ' ' + __version__ + '''</generator>
<created/>
<abstract></abstract>
<cover/>
</head>
</book-snbf>
''')
f.close()
f = open(os.path.join(snbfdir, 'toc.snbf'), 'wb')
f.write('''<toc-snbf>
<head>
<chapters>0</chapters>
</head>
<body>
</body>
</toc-snbf>
''');
f.close()
cache_name = os.path.join(snbcdir, self.METADATA_CACHE)
with open(cache_name, 'wb') as f:
json_codec.encode_to_file(f, booklists[0])
with TemporaryFile('.snb') as f:
if self.bambook.PackageSNB(f, tdir):
if not self.bambook.SendFile(f, self.METADATA_FILE_GUID):
print "Upload failed"
else:
print "Package failed"
# Clear the _new_book indication, as we are supposed to be done with
# adding books at this point
for blist in booklists:
if blist is not None:
for book in blist:
book._new_book = False
self.report_progress(1.0, _('Sending metadata to device...'))
def get_file(self, path, outfile, end_session=True):
'''
Read the file at ``path`` on the device and write it to outfile.
:param outfile: file object like ``sys.stdout`` or the result of an
:func:`open` call.
'''
if self.bambook:
with TemporaryDirectory() as tdir:
if self.bambook.GetFile(path, tdir):
filepath = os.path.join(tdir, path)
f = file(filepath, 'rb')
outfile.write(f.read())
f.close()
else:
print "Unable to get file from Bambook:", path
@classmethod
def config_widget(cls):
'''
Should return a QWidget. The QWidget contains the settings for the device interface
'''
from calibre.gui2.device_drivers.configwidget import ConfigWidget
cw = ConfigWidget(cls.settings(), cls.FORMATS, cls.SUPPORTS_SUB_DIRS,
cls.MUST_READ_METADATA, cls.SUPPORTS_USE_AUTHOR_SORT,
cls.EXTRA_CUSTOMIZATION_MESSAGE)
# Turn off the Save template
cw.opt_save_template.setVisible(False)
cw.label.setVisible(False)
# Repurpose the metadata checkbox
cw.opt_read_metadata.setVisible(False)
# Repurpose the use_subdirs checkbox
cw.opt_use_subdirs.setVisible(False)
return cw
# @classmethod
# def save_settings(cls, settings_widget):
# '''
# Should save settings to disk. Takes the widget created in
# :meth:`config_widget` and saves all settings to disk.
# '''
# raise NotImplementedError()
# @classmethod
# def settings(cls):
# '''
# Should return an opts object. The opts object should have at least one attribute
# `format_map` which is an ordered list of formats for the device.
# '''
# raise NotImplementedError()
def parse_metadata_cache(self, bl):
need_sync = True
if not self.bambook:
return need_sync
# Get the metadata virtual book from Bambook
with TemporaryDirectory() as tdir:
if self.bambook.GetFile(self.METADATA_FILE_GUID, tdir):
cache_name = os.path.join(tdir, self.METADATA_CACHE)
if self.bambook.ExtractSNBContent(os.path.join(tdir, self.METADATA_FILE_GUID),
'snbc/' + self.METADATA_CACHE,
cache_name):
json_codec = JsonCodec()
if os.access(cache_name, os.R_OK):
try:
with open(cache_name, 'rb') as f:
json_codec.decode_from_file(f, bl, self.book_class, '')
need_sync = False
except:
import traceback
traceback.print_exc()
bl = []
return need_sync
@classmethod
def update_metadata_item(cls, book, blb):
# Currently, we do not have enough information
# from Bambook SDK to judge whether a book has
# been changed, we assume all books has been
# changed.
changed = True
# if book.bookName.decode(text_encoding) != blb.title:
# changed = True
# if book.bookAuthor.decode(text_encoding) != blb.authors[0]:
# changed = True
# if book.bookAbstract.decode(text_encoding) != blb.comments:
# changed = True
return changed
@staticmethod
def get_guid(uuid):
guid = hashlib.md5(uuid).hexdigest()[0:15] + ".snb"
return guid

View File

@ -0,0 +1,530 @@
# -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = '2010, Li Fanxi <lifanxi at freemindworld.com>'
__docformat__ = 'restructuredtext en'
'''
Sanda library wrapper
'''
import ctypes, uuid, hashlib, os, sys
from threading import Event, Lock
from calibre.constants import iswindows, islinux, isosx
from calibre import load_library
try:
_lib_name = 'libBambookCore'
cdll = ctypes.cdll
if iswindows:
_lib_name = 'BambookCore'
if hasattr(sys, 'frozen') and iswindows:
lp = os.path.join(os.path.dirname(sys.executable), 'DLLs', 'BambookCore.dll')
lib_handle = cdll.LoadLibrary(lp)
elif hasattr(sys, 'frozen_path'):
lp = os.path.join(sys.frozen_path, 'lib', 'libBambookCore.so')
lib_handle = cdll.LoadLibrary(lp)
else:
lib_handle = load_library(_lib_name, cdll)
except:
lib_handle = None
if iswindows:
text_encoding = 'mbcs'
elif islinux:
text_encoding = 'utf-8'
elif isosx:
text_encoding = 'utf-8'
def is_bambook_lib_ready():
return lib_handle != None
# Constant
DEFAULT_BAMBOOK_IP = '192.168.250.2'
BAMBOOK_SDK_VERSION = 0x00090000
BR_SUCC = 0 # 操作成功
BR_FAIL = 1001 # 操作失败
BR_NOT_IMPL = 1002 # 该功能还未实现
BR_DISCONNECTED = 1003 # 与设备的连接已断开
BR_PARAM_ERROR = 1004 # 调用函数传入的参数错误
BR_TIMEOUT = 1005 # 操作或通讯超时
BR_INVALID_HANDLE = 1006 # 传入的句柄无效
BR_INVALID_FILE = 1007 # 传入的文件不存在或格式无效
BR_INVALID_DIR = 1008 # 传入的目录不存在
BR_BUSY = 1010 # 设备忙,另一个操作还未完成
BR_EOF = 1011 # 文件或操作已结束
BR_IO_ERROR = 1012 # 文件读写失败
BR_FILE_NOT_INSIDE = 1013 # 指定的文件不在包里
# 当前连接状态
CONN_CONNECTED = 0 # 已连接
CONN_DISCONNECTED = 1 # 未连接或连接已断开
CONN_CONNECTING = 2 # 正在连接
CONN_WAIT_FOR_AUTH = 3 # 已连接,正在等待身份验证(暂未实现)
#传输状态
TRANS_STATUS_TRANS = 0 #正在传输
TRANS_STATUS_DONE = 1 #传输完成
TRANS_STATUS_ERR = 2 #传输出错
# Key Enums
BBKeyNum0 = 0
BBKeyNum1 = 1
BBKeyNum2 = 2
BBKeyNum3 = 3
BBKeyNum4 = 4
BBKeyNum5 = 5
BBKeyNum6 = 6
BBKeyNum7 = 7
BBKeyNum8 = 8
BBKeyNum9 = 9
BBKeyStar = 10
BBKeyCross = 11
BBKeyUp = 12
BBKeyDown = 13
BBKeyLeft = 14
BBKeyRight = 15
BBKeyPageUp = 16
BBKeyPageDown = 17
BBKeyOK = 18
BBKeyESC = 19
BBKeyBookshelf = 20
BBKeyStore = 21
BBKeyTTS = 22
BBKeyMenu = 23
BBKeyInteract =24
class DeviceInfo(ctypes.Structure):
_fields_ = [ ("cbSize", ctypes.c_int),
("sn", ctypes.c_char * 20),
("firmwareVersion", ctypes.c_char * 20),
("deviceVolume", ctypes.c_int),
("spareVolume", ctypes.c_int),
]
def __init__(self):
self.cbSize = ctypes.sizeof(self)
class PrivBookInfo(ctypes.Structure):
_fields_ = [ ("cbSize", ctypes.c_int),
("bookGuid", ctypes.c_char * 20),
("bookName", ctypes.c_char * 80),
("bookAuthor", ctypes.c_char * 40),
("bookAbstract", ctypes.c_char * 256),
]
def Clone(self):
bookInfo = PrivBookInfo()
bookInfo.cbSize = self.cbSize
bookInfo.bookGuid = self.bookGuid
bookInfo.bookName = self.bookName
bookInfo.bookAuthor = self.bookAuthor
bookInfo.bookAbstract = self.bookAbstract
return bookInfo
def __init__(self):
self.cbSize = ctypes.sizeof(self)
# extern "C"_declspec(dllexport) BB_RESULT BambookConnect(const char* lpszIP, int timeOut, BB_HANDLE* hConn);
def BambookConnect(ip = DEFAULT_BAMBOOK_IP, timeout = 0):
if isinstance(ip, unicode):
ip = ip.encode('ascii')
handle = ctypes.c_void_p(0)
if lib_handle == None:
raise Exception(_('Bambook SDK has not been installed.'))
ret = lib_handle.BambookConnect(ip, timeout, ctypes.byref(handle))
if ret == BR_SUCC:
return handle
else:
return None
# extern "C" _declspec(dllexport) BB_RESULT BambookGetConnectStatus(BB_HANDLE hConn, int* status);
def BambookGetConnectStatus(handle):
status = ctypes.c_int(0)
ret = lib_handle.BambookGetConnectStatus(handle, ctypes.byref(status))
if ret == BR_SUCC:
return status.value
else:
return None
# extern "C" _declspec(dllexport) BB_RESULT BambookDisconnect(BB_HANDLE hConn);
def BambookDisconnect(handle):
ret = lib_handle.BambookDisconnect(handle)
if ret == BR_SUCC:
return True
else:
return False
# extern "C" const char * BambookGetErrorString(BB_RESULT nCode)
def BambookGetErrorString(code):
func = lib_handle.BambookGetErrorString
func.restype = ctypes.c_char_p
return func(code)
# extern "C" BB_RESULT BambookGetSDKVersion(uint32_t * version);
def BambookGetSDKVersion():
version = ctypes.c_int(0)
lib_handle.BambookGetSDKVersion(ctypes.byref(version))
return version.value
# extern "C" BB_RESULT BambookGetDeviceInfo(BB_HANDLE hConn, DeviceInfo* pInfo);
def BambookGetDeviceInfo(handle):
deviceInfo = DeviceInfo()
ret = lib_handle.BambookGetDeviceInfo(handle, ctypes.byref(deviceInfo))
if ret == BR_SUCC:
return deviceInfo
else:
return None
# extern "C" BB_RESULT BambookKeyPress(BB_HANDLE hConn, BambookKey key);
def BambookKeyPress(handle, key):
ret = lib_handle.BambookKeyPress(handle, key)
if ret == BR_SUCC:
return True
else:
return False
# extern "C" BB_RESULT BambookGetFirstPrivBookInfo(BB_HANDLE hConn, PrivBookInfo * pInfo);
def BambookGetFirstPrivBookInfo(handle, bookInfo):
bookInfo.contents.cbSize = ctypes.sizeof(bookInfo.contents)
ret = lib_handle.BambookGetFirstPrivBookInfo(handle, bookInfo)
if ret == BR_SUCC:
return True
else:
return False
# extern "C" BB_RESULT BambookGetNextPrivBookInfo(BB_HANDLE hConn, PrivBookInfo * pInfo);
def BambookGetNextPrivBookInfo(handle, bookInfo):
bookInfo.contents.cbSize = ctypes.sizeof(bookInfo.contents)
ret = lib_handle.BambookGetNextPrivBookInfo(handle, bookInfo)
if ret == BR_SUCC:
return True
elif ret == BR_EOF:
return False
else:
return False
# extern "C" BB_RESULT BambookDeletePrivBook(BB_HANDLE hConn, const char * lpszBookID);
def BambookDeletePrivBook(handle, guid):
if isinstance(guid, unicode):
guid = guid.encode('ascii')
ret = lib_handle.BambookDeletePrivBook(handle, guid)
if ret == BR_SUCC:
return True
else:
return False
class JobQueue:
jobs = {}
maxID = 0
lock = Lock()
def __init__(self):
self.maxID = 0
def NewJob(self):
self.lock.acquire()
self.maxID = self.maxID + 1
maxid = self.maxID
self.lock.release()
event = Event()
self.jobs[maxid] = (event, TRANS_STATUS_TRANS)
return maxid
def FinishJob(self, jobID, status):
self.jobs[jobID] = (self.jobs[jobID][0], status)
self.jobs[jobID][0].set()
def WaitJob(self, jobID):
self.jobs[jobID][0].wait()
return (self.jobs[jobID][1] == TRANS_STATUS_DONE)
def DeleteJob(self, jobID):
del self.jobs[jobID]
job = JobQueue()
def BambookTransferCallback(status, progress, userData):
if status == TRANS_STATUS_DONE and progress == 100:
job.FinishJob(userData, status)
elif status == TRANS_STATUS_ERR:
job.FinishJob(userData, status)
TransCallback = ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_int, ctypes.c_int)
bambookTransferCallback = TransCallback(BambookTransferCallback)
# extern "C" BB_RESULT BambookAddPrivBook(BB_HANDLE hConn, const char * pszSnbFile,
# TransCallback pCallbackFunc, intptr_t userData);
def BambookAddPrivBook(handle, filename, callback, userData):
if isinstance(filename, unicode):
filename = filename.encode('ascii')
ret = lib_handle.BambookAddPrivBook(handle, filename, callback, userData)
if ret == BR_SUCC:
return True
else:
return False
# extern "C" BB_RESULT BambookReplacePrivBook(BB_HANDLE hConn, const char *
# pszSnbFile, const char * lpszBookID, TransCallback pCallbackFunc, intptr_t userData);
def BambookReplacePrivBook(handle, filename, bookID, callback, userData):
if isinstance(filename, unicode):
filename = filename.encode('ascii')
if isinstance(bookID, unicode):
bookID = bookID.encode('ascii')
ret = lib_handle.BambookReplacePrivBook(handle, filename, bookID, callback, userData)
if ret == BR_SUCC:
return True
else:
return False
# extern "C" BB_RESULT BambookFetchPrivBook(BB_HANDLE hConn, const char *
# lpszBookID, const char * lpszFilePath, TransCallback pCallbackFunc, intptr_t userData);
def BambookFetchPrivBook(handle, bookID, filename, callback, userData):
if isinstance(filename, unicode):
filename = filename.encode('ascii')
if isinstance(bookID, unicode):
bookID = bookID.encode('ascii')
ret = lib_handle.BambookFetchPrivBook(handle, bookID, filename, bambookTransferCallback, userData)
if ret == BR_SUCC:
return True
else:
return False
# extern "C" BB_RESULT BambookVerifySnbFile(const char * snbName)
def BambookVerifySnbFile(filename):
if isinstance(filename, unicode):
filename = filename.encode('ascii')
if lib_handle.BambookVerifySnbFile(filename) == BR_SUCC:
return True
else:
return False
# BB_RESULT BambookPackSnbFromDir ( const char * snbName,, const char * rootDir );
def BambookPackSnbFromDir(snbFileName, rootDir):
if isinstance(snbFileName, unicode):
snbFileName = snbFileName.encode('ascii')
if isinstance(rootDir, unicode):
rootDir = rootDir.encode('ascii')
ret = lib_handle.BambookPackSnbFromDir(snbFileName, rootDir)
if ret == BR_SUCC:
return True
else:
return False
# BB_RESULT BambookUnpackFileFromSnb ( const char * snbName,, const char * relativePath, const char * outfname );
def BambookUnpackFileFromSnb(snbFileName, relPath, outFileName):
if isinstance(snbFileName, unicode):
snbFileName = snbFileName.encode('ascii')
if isinstance(relPath, unicode):
relPath = relPath.encode('ascii')
if isinstance(outFileName, unicode):
outFileName = outFileName.encode('ascii')
ret = lib_handle.BambookUnpackFileFromSnb(snbFileName, relPath, outFileName)
if ret == BR_SUCC:
return True
else:
return False
class Bambook:
def __init__(self):
self.handle = None
def Connect(self, ip = DEFAULT_BAMBOOK_IP, timeout = 10000):
self.handle = BambookConnect(ip, timeout)
if self.handle and self.handle != 0:
return True
else:
return False
def Disconnect(self):
if self.handle:
return BambookDisconnect(self.handle)
return False
def GetState(self):
if self.handle:
return BambookGetConnectStatus(self.handle)
return CONN_DISCONNECTED
def GetDeviceInfo(self):
if self.handle:
return BambookGetDeviceInfo(self.handle)
return None
def SendFile(self, fileName, guid = None):
if self.handle:
taskID = job.NewJob()
if guid:
if BambookReplacePrivBook(self.handle, fileName, guid,
bambookTransferCallback, taskID):
if(job.WaitJob(taskID)):
job.DeleteJob(taskID)
return guid
else:
job.DeleteJob(taskID)
return None
else:
job.DeleteJob(taskID)
return None
else:
guid = hashlib.md5(str(uuid.uuid4())).hexdigest()[0:15] + ".snb"
if BambookReplacePrivBook(self.handle, fileName, guid,
bambookTransferCallback, taskID):
if job.WaitJob(taskID):
job.DeleteJob(taskID)
return guid
else:
job.DeleteJob(taskID)
return None
else:
job.DeleteJob(taskID)
return None
return False
def GetFile(self, guid, fileName):
if self.handle:
taskID = job.NewJob()
ret = BambookFetchPrivBook(self.handle, guid, fileName, bambookTransferCallback, taskID)
if ret:
ret = job.WaitJob(taskID)
job.DeleteJob(taskID)
return ret
else:
job.DeleteJob(taskID)
return False
return False
def DeleteFile(self, guid):
if self.handle:
ret = BambookDeletePrivBook(self.handle, guid)
return ret
return False
def GetBookList(self):
if self.handle:
books = []
bookInfo = PrivBookInfo()
bi = ctypes.pointer(bookInfo)
ret = BambookGetFirstPrivBookInfo(self.handle, bi)
while ret:
books.append(bi.contents.Clone())
ret = BambookGetNextPrivBookInfo(self.handle, bi)
return books
@staticmethod
def GetSDKVersion():
return BambookGetSDKVersion()
@staticmethod
def VerifySNB(fileName):
return BambookVerifySnbFile(fileName);
@staticmethod
def ExtractSNBContent(fileName, relPath, path):
return BambookUnpackFileFromSnb(fileName, relPath, path)
@staticmethod
def ExtractSNB(fileName, path):
ret = BambookUnpackFileFromSnb(fileName, 'snbf/book.snbf', path + '/snbf/book.snbf')
if not ret:
return False
ret = BambookUnpackFileFromSnb(fileName, 'snbf/toc.snbf', path + '/snbf/toc.snbf')
if not ret:
return False
return True
@staticmethod
def PackageSNB(fileName, path):
return BambookPackSnbFromDir(fileName, path)
def passed():
print "> Pass"
def failed():
print "> Failed"
if __name__ == "__main__":
print "Bambook SDK Unit Test"
bb = Bambook()
print "Disconnect State"
if bb.GetState() == CONN_DISCONNECTED:
passed()
else:
failed()
print "Get SDK Version"
if bb.GetSDKVersion() == BAMBOOK_SDK_VERSION:
passed()
else:
failed()
print "Verify good SNB File"
if bb.VerifySNB(u'/tmp/f8268e6c1f4e78c.snb'):
passed()
else:
failed()
print "Verify bad SNB File"
if not bb.VerifySNB('./libwrapper.py'):
passed()
else:
failed()
print "Extract SNB File"
if bb.ExtractSNB('./test.snb', '/tmp/test'):
passed()
else:
failed()
print "Packet SNB File"
if bb.PackageSNB('/tmp/tmp.snb', '/tmp/test') and bb.VerifySNB('/tmp/tmp.snb'):
passed()
else:
failed()
print "Connect to Bambook"
if bb.Connect('192.168.250.2', 10000) and bb.GetState() == CONN_CONNECTED:
passed()
else:
failed()
print "Get Bambook Info"
devInfo = bb.GetDeviceInfo()
if devInfo:
# print "Info Size: ", devInfo.cbSize
# print "SN: ", devInfo.sn
# print "Firmware: ", devInfo.firmwareVersion
# print "Capacity: ", devInfo.deviceVolume
# print "Free: ", devInfo.spareVolume
if devInfo.cbSize == 52 and devInfo.deviceVolume == 1714232:
passed()
else:
failed()
print "Send file"
if bb.SendFile('/tmp/tmp.snb'):
passed()
else:
failed()
print "Get book list"
books = bb.GetBookList()
if len(books) > 10:
passed()
else:
failed()
print "Get book"
if bb.GetFile('f8268e6c1f4e78c.snb', '/tmp') and bb.VerifySNB('/tmp/f8268e6c1f4e78c.snb'):
passed()
else:
failed()
print "Disconnect"
if bb.Disconnect():
passed()
else:
failed()

View File

@ -65,6 +65,7 @@ class ORIZON(CYBOOK):
BCD = [0x319]
VENDOR_NAME = ['BOOKEEN', 'LINUX']
WINDOWS_MAIN_MEM = re.compile(r'(CYBOOK_ORIZON__-FD)|(FILE-STOR_GADGET)')
WINDOWS_CARD_A_MEM = re.compile('(CYBOOK_ORIZON__-SD)|(FILE-STOR_GADGET)')

View File

@ -230,7 +230,7 @@ class POCKETBOOK301(USBMS):
class POCKETBOOK602(USBMS):
name = 'PocketBook Pro 602/902 Device Interface'
description = _('Communicate with the PocketBook 602 reader.')
description = _('Communicate with the PocketBook 602/603/902/903 reader.')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
FORMATS = ['epub', 'fb2', 'prc', 'mobi', 'pdf', 'djvu', 'rtf', 'chm',
@ -244,5 +244,34 @@ class POCKETBOOK602(USBMS):
BCD = [0x0324]
VENDOR_NAME = ''
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['PB602', 'PB902']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['PB602', 'PB603', 'PB902', 'PB903']
class POCKETBOOK701(USBMS):
name = 'PocketBook 701 Device Interface'
description = _('Communicate with the PocketBook 701')
author = _('Kovid Goyal')
supported_platforms = ['windows', 'osx', 'linux']
FORMATS = ['epub', 'fb2', 'prc', 'mobi', 'pdf', 'djvu', 'rtf', 'chm',
'doc', 'tcr', 'txt']
EBOOK_DIR_MAIN = 'books'
SUPPORTS_SUB_DIRS = True
VENDOR_ID = [0x18d1]
PRODUCT_ID = [0xa004]
BCD = [0x0224]
VENDOR_NAME = 'ANDROID'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = '__UMS_COMPOSITE'
def windows_sort_drives(self, drives):
if len(drives) < 2: return drives
main = drives.get('main', None)
carda = drives.get('carda', None)
if main and carda:
drives['main'] = carda
drives['carda'] = main
return drives

View File

@ -36,6 +36,11 @@ class UserFeedback(DeviceError):
self.details = details
self.msg = msg
class OpenFeedback(DeviceError):
def __init__(self, msg):
self.feedback_msg = msg
DeviceError.__init__(self, msg)
class DeviceBusy(ProtocolError):
""" Raised when device is busy """
def __init__(self, uerr=""):

View File

@ -216,6 +216,9 @@ class DevicePlugin(Plugin):
an implementation of
this function that should serve as a good example for USB Mass storage
devices.
This method can raise an OpenFeedback exception to display a message to
the user.
'''
raise NotImplementedError()

View File

@ -96,7 +96,7 @@ class KOBO(USBMS):
for idx,b in enumerate(bl):
bl_cache[b.lpath] = idx
def update_booklist(prefix, path, title, authors, mime, date, ContentType, ImageID, readstatus):
def update_booklist(prefix, path, title, authors, mime, date, ContentType, ImageID, readstatus, MimeType):
changed = False
# if path_to_ext(path) in self.FORMATS:
try:
@ -124,7 +124,7 @@ class KOBO(USBMS):
#print "Image name Normalized: " + imagename
if imagename is not None:
bl[idx].thumbnail = ImageWrapper(imagename)
if (ContentType != '6'and self.has_kepubs == False) or (self.has_kepubs == True):
if (ContentType != '6' and MimeType != 'Shortcover'):
if self.update_metadata_item(bl[idx]):
# print 'update_metadata_item returned true'
changed = True
@ -132,7 +132,7 @@ class KOBO(USBMS):
playlist_map[lpath] not in bl[idx].device_collections:
bl[idx].device_collections.append(playlist_map[lpath])
else:
if ContentType == '6' and self.has_kepubs == False:
if ContentType == '6' and MimeType == 'Shortcover':
book = Book(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=1048576)
else:
try:
@ -177,15 +177,15 @@ class KOBO(USBMS):
for i, row in enumerate(cursor):
# self.report_progress((i+1) / float(numrows), _('Getting list of books on device...'))
path = self.path_from_contentid(row[3], row[5], oncard)
path = self.path_from_contentid(row[3], row[5], row[4], oncard)
mime = mime_type_ext(path_to_ext(path)) if path.find('kepub') == -1 else 'application/epub+zip'
# debug_print("mime:", mime)
if oncard != 'carda' and oncard != 'cardb' and not row[3].startswith("file:///mnt/sd/"):
changed = update_booklist(self._main_prefix, path, row[0], row[1], mime, row[2], row[5], row[6], row[7])
changed = update_booklist(self._main_prefix, path, row[0], row[1], mime, row[2], row[5], row[6], row[7], row[4])
# print "shortbook: " + path
elif oncard == 'carda' and row[3].startswith("file:///mnt/sd/"):
changed = update_booklist(self._card_a_prefix, path, row[0], row[1], mime, row[2], row[5], row[6], row[7])
changed = update_booklist(self._card_a_prefix, path, row[0], row[1], mime, row[2], row[5], row[6], row[7], row[4])
if changed:
need_sync = True
@ -363,7 +363,8 @@ class KOBO(USBMS):
def contentid_from_path(self, path, ContentType):
if ContentType == 6:
if self.has_kepubs == False:
extension = os.path.splitext(path)[1]
if extension == '.kobo':
ContentID = os.path.splitext(path)[0]
# Remove the prefix on the file. it could be either
ContentID = ContentID.replace(self._main_prefix, '')
@ -411,7 +412,7 @@ class KOBO(USBMS):
ContentType = 999 # Yet another hack: to get around Kobo changing how ContentID is stored
return ContentType
def path_from_contentid(self, ContentID, ContentType, oncard):
def path_from_contentid(self, ContentID, ContentType, MimeType, oncard):
path = ContentID
if oncard == 'cardb':
@ -420,13 +421,13 @@ class KOBO(USBMS):
path = path.replace("file:///mnt/sd/", self._card_a_prefix)
# print "SD Card: " + path
else:
if ContentType == "6" and self.has_kepubs == False:
if ContentType == "6" and MimeType == 'Shortcover':
# This is a hack as the kobo files do not exist
# but the path is required to make a unique id
# for calibre's reference
path = self._main_prefix + path + '.kobo'
# print "Path: " + path
elif (ContentType == "6" or ContentType == "10") and self.has_kepubs == True:
elif (ContentType == "6" or ContentType == "10") and MimeType == 'application/x-kobo-epub+zip':
path = self._main_prefix + '.kobo/kepub/' + path
# print "Internal: " + path
else:

View File

@ -62,9 +62,9 @@ class SWEEX(USBMS):
# Ordered list of supported formats
FORMATS = ['epub', 'prc', 'fb2', 'html', 'rtf', 'chm', 'pdf', 'txt']
VENDOR_ID = [0x0525]
PRODUCT_ID = [0xa4a5]
BCD = [0x0319]
VENDOR_ID = [0x0525, 0x177f]
PRODUCT_ID = [0xa4a5, 0x300]
BCD = [0x0319, 0x110]
VENDOR_NAME = 'SWEEX'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'EBOOKREADER'
@ -104,7 +104,7 @@ class PDNOVEL(USBMS):
VENDOR_NAME = 'ANDROID'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = '__UMS_COMPOSITE'
THUMBNAIL_HEIGHT = 144
THUMBNAIL_HEIGHT = 130
EBOOK_DIR_MAIN = 'eBooks'
SUPPORTS_SUB_DIRS = False
@ -204,3 +204,63 @@ class LUMIREAD(USBMS):
with open(cfilepath+'.jpg', 'wb') as f:
f.write(metadata.thumbnail[-1])
class ALURATEK_COLOR(USBMS):
name = 'Aluratek Color Device Interface'
gui_name = 'Aluratek Color'
description = _('Communicate with the Aluratek Color')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'fb2', 'txt', 'pdf']
VENDOR_ID = [0x1f3a]
PRODUCT_ID = [0x1000]
BCD = [0x0002]
EBOOK_DIR_MAIN = EBOOK_DIR_CARD_A = 'books'
VENDOR_NAME = 'USB_2.0'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'USB_FLASH_DRIVER'
class TREKSTOR(USBMS):
name = 'Trekstor E-book player device interface'
gui_name = 'Trekstor'
description = _('Communicate with the Trekstor')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'txt', 'pdf']
VENDOR_ID = [0x1e68]
PRODUCT_ID = [0x0041]
BCD = [0x0002]
EBOOK_DIR_MAIN = 'Ebooks'
VENDOR_NAME = 'TREKSTOR'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'EBOOK_PLAYER_7'
class EEEREADER(USBMS):
name = 'Asus EEE Reader device interface'
gui_name = 'EEE Reader'
description = _('Communicate with the EEE Reader')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'fb2', 'txt', 'pdf']
VENDOR_ID = [0x0b05]
PRODUCT_ID = [0x178f]
BCD = [0x0319]
EBOOK_DIR_MAIN = 'Books'
VENDOR_NAME = 'LINUX'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'FILE-STOR_GADGET'

View File

@ -58,9 +58,16 @@ class PRS505(USBMS):
SUPPORTS_USE_AUTHOR_SORT = True
EBOOK_DIR_MAIN = 'database/media/books'
ALL_BY_TITLE = _('All by title')
ALL_BY_AUTHOR = _('All by author')
EXTRA_CUSTOMIZATION_MESSAGE = _('Comma separated list of metadata fields '
'to turn into collections on the device. Possibilities include: ')+\
'series, tags, authors'
'series, tags, authors' +\
_('. Two special collections are available: %s:%s and %s:%s. Add '
'these values to the list to enable them. The collections will be '
'given the name provided after the ":" character.')%(
'abt', ALL_BY_TITLE, 'aba', ALL_BY_AUTHOR)
EXTRA_CUSTOMIZATION_DEFAULT = ', '.join(['series', 'tags'])
plugboard = None
@ -151,7 +158,7 @@ class PRS505(USBMS):
blists[i] = booklists[i]
opts = self.settings()
if opts.extra_customization:
collections = [x.lower().strip() for x in
collections = [x.strip() for x in
opts.extra_customization.split(',')]
else:
collections = []
@ -179,6 +186,8 @@ class PRS505(USBMS):
self.plugboard_func = pb_func
def upload_cover(self, path, filename, metadata, filepath):
return # Disabled as the SONY's don't need this thumbnail anyway and
# older models don't auto delete it
if metadata.thumbnail and metadata.thumbnail[-1]:
path = path.replace('/', os.sep)
is_main = path.startswith(self._main_prefix)

View File

@ -410,6 +410,9 @@ class XMLCache(object):
newmi = book.deepcopy_metadata()
newmi.template_to_attribute(book, plugboard)
newmi.set('_new_book', getattr(book, '_new_book', False))
book.set('_pb_title_sort',
newmi.get('title_sort', newmi.get('title', None)))
book.set('_pb_author_sort', newmi.get('author_sort', ''))
else:
newmi = book
(gtz_count, ltz_count, use_tz_var) = \

View File

@ -23,16 +23,16 @@ class SNE(USBMS):
FORMATS = ['epub', 'pdf', 'txt']
VENDOR_ID = [0x04e8]
PRODUCT_ID = [0x2051, 0x2053]
PRODUCT_ID = [0x2051, 0x2053, 0x2054]
BCD = [0x0323]
VENDOR_NAME = 'SAMSUNG'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'SNE-60'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['SNE-60', 'E65']
MAIN_MEMORY_VOLUME_LABEL = 'SNE Main Memory'
STORAGE_CARD_VOLUME_LABEL = 'SNE Storage Card'
EBOOK_DIR_MAIN = 'Books'
EBOOK_DIR_MAIN = EBOOK_DIR_CARD_A = 'Books'
SUPPORTS_SUB_DIRS = True

View File

@ -72,3 +72,13 @@ class SOVOS(TECLAST_K3):
VENDOR_NAME = 'RK28XX'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'USB-MSC'
class SUNSTECH_EB700(TECLAST_K3):
name = 'Sunstech EB700 device interface'
gui_name = 'EB700'
description = _('Communicate with the Sunstech EB700 reader.')
FORMATS = ['epub', 'fb2', 'pdf', 'pdb', 'txt']
VENDOR_NAME = 'SUNEB700'
WINDOWS_MAIN_MEM = 'USB-MSC'

View File

@ -14,6 +14,7 @@ from calibre.constants import preferred_encoding
from calibre import isbytestring, force_unicode
from calibre.utils.config import prefs, tweaks
from calibre.utils.icu import strcmp
from calibre.utils.formatter import eval_formatter
class Book(Metadata):
def __init__(self, prefix, lpath, size=None, other=None):
@ -107,23 +108,25 @@ class CollectionsBookList(BookList):
return sortattr
return None
def compute_category_name(self, attr, category, field_meta):
def compute_category_name(self, field_key, field_value, field_meta):
renames = tweaks['sony_collection_renaming_rules']
attr_name = renames.get(attr, None)
if attr_name is None:
field_name = renames.get(field_key, None)
if field_name is None:
if field_meta['is_custom']:
attr_name = '(%s)'%field_meta['name']
field_name = field_meta['name']
else:
attr_name = ''
elif attr_name != '':
attr_name = '(%s)'%attr_name
cat_name = '%s %s'%(category, attr_name)
field_name = ''
cat_name = eval_formatter.safe_format(
fmt=tweaks['sony_collection_name_template'],
kwargs={'category':field_name, 'value':field_value},
error_value='GET_CATEGORY', book=None)
return cat_name.strip()
def get_collections(self, collection_attributes):
from calibre.devices.usbms.driver import debug_print
debug_print('Starting get_collections:', prefs['manage_device_metadata'])
debug_print('Renaming rules:', tweaks['sony_collection_renaming_rules'])
debug_print('Formatting template:', tweaks['sony_collection_name_template'])
debug_print('Sorting rules:', tweaks['sony_collection_sorting_rules'])
# Complexity: we can use renaming rules only when using automatic
@ -132,9 +135,24 @@ class CollectionsBookList(BookList):
use_renaming_rules = prefs['manage_device_metadata'] == 'on_connect'
collections = {}
# This map of sets is used to avoid linear searches when testing for
# book equality
# get the special collection names
all_by_author = ''
all_by_title = ''
ca = []
for c in collection_attributes:
if c.startswith('aba:') and c[4:]:
all_by_author = c[4:].strip()
elif c.startswith('abt:') and c[4:]:
all_by_title = c[4:].strip()
else:
ca.append(c.lower())
collection_attributes = ca
for book in self:
tsval = book.get('_pb_title_sort',
book.get('title_sort', book.get('title', 'zzzz')))
asval = book.get('_pb_author_sort', book.get('author_sort', ''))
# Make sure we can identify this book via the lpath
lpath = getattr(book, 'lpath', None)
if lpath is None:
@ -211,22 +229,29 @@ class CollectionsBookList(BookList):
collections[cat_name] = {}
if use_renaming_rules and sort_attr:
sort_val = book.get(sort_attr, None)
collections[cat_name][lpath] = \
(book, sort_val, book.get('title_sort', 'zzzz'))
collections[cat_name][lpath] = (book, sort_val, tsval)
elif is_series:
if doing_dc:
collections[cat_name][lpath] = \
(book, book.get('series_index', sys.maxint),
book.get('title_sort', 'zzzz'))
(book, book.get('series_index', sys.maxint), tsval)
else:
collections[cat_name][lpath] = \
(book, book.get(attr+'_index', sys.maxint),
book.get('title_sort', 'zzzz'))
(book, book.get(attr+'_index', sys.maxint), tsval)
else:
if lpath not in collections[cat_name]:
collections[cat_name][lpath] = \
(book, book.get('title_sort', 'zzzz'),
book.get('title_sort', 'zzzz'))
collections[cat_name][lpath] = (book, tsval, tsval)
# All books by author
if all_by_author:
if all_by_author not in collections:
collections[all_by_author] = {}
collections[all_by_author][lpath] = (book, asval, tsval)
# All books by title
if all_by_title:
if all_by_title not in collections:
collections[all_by_title] = {}
collections[all_by_title][lpath] = (book, tsval, asval)
# Sort collections
result = {}
@ -240,7 +265,7 @@ class CollectionsBookList(BookList):
return 1
if y is None:
return -1
if isinstance(x, (unicode, str)):
if isinstance(x, basestring) and isinstance(y, basestring):
c = strcmp(force_unicode(x), force_unicode(y))
else:
c = cmp(x, y)

View File

@ -605,8 +605,9 @@ class Device(DeviceConfig, DevicePlugin):
main, carda, cardb = self.find_device_nodes()
if main is None:
raise DeviceError(_('Unable to detect the %s disk drive. Your '
' kernel is probably exporting a deprecated version of SYSFS.')
raise DeviceError(_('Unable to detect the %s disk drive. Either '
'the device has already been ejected, or your '
'kernel is exporting a deprecated version of SYSFS.')
%self.__class__.__name__)
self._linux_mount_map = {}

View File

@ -22,6 +22,9 @@ class UnknownFormatError(Exception):
class DRMError(ValueError):
pass
class ParserError(ValueError):
pass
BOOK_EXTENSIONS = ['lrf', 'rar', 'zip', 'rtf', 'lit', 'txt', 'htm', 'xhtm',
'html', 'xhtml', 'pdf', 'pdb', 'pdr', 'prc', 'mobi', 'azw', 'doc',
'epub', 'fb2', 'djvu', 'lrx', 'cbr', 'cbz', 'cbc', 'oebzip',
@ -39,6 +42,10 @@ class HTMLRenderer(object):
try:
if not ok:
raise RuntimeError('Rendering of HTML failed.')
de = self.page.mainFrame().documentElement()
pe = de.findFirst('parsererror')
if not pe.isNull():
raise ParserError(pe.toPlainText())
image = QImage(self.page.viewportSize(), QImage.Format_ARGB32)
image.setDotsPerMeterX(96*(100/2.54))
image.setDotsPerMeterY(96*(100/2.54))
@ -104,7 +111,7 @@ def render_html_svg_workaround(path_to_html, log, width=590, height=750):
return data
def render_html(path_to_html, width=590, height=750):
def render_html(path_to_html, width=590, height=750, as_xhtml=True):
from PyQt4.QtWebKit import QWebPage
from PyQt4.Qt import QEventLoop, QPalette, Qt, SIGNAL, QUrl, QSize
from calibre.gui2 import is_ok_to_use_qt
@ -122,11 +129,18 @@ def render_html(path_to_html, width=590, height=750):
renderer = HTMLRenderer(page, loop)
page.connect(page, SIGNAL('loadFinished(bool)'), renderer,
Qt.QueuedConnection)
page.mainFrame().load(QUrl.fromLocalFile(path_to_html))
if as_xhtml:
page.mainFrame().setContent(open(path_to_html, 'rb').read(),
'application/xhtml+xml', QUrl.fromLocalFile(path_to_html))
else:
page.mainFrame().load(QUrl.fromLocalFile(path_to_html))
loop.exec_()
renderer.loop = renderer.page = None
del page
del loop
if isinstance(renderer.exception, ParserError) and as_xhtml:
return render_html(path_to_html, width=width, height=height,
as_xhtml=False)
return renderer
def check_ebook_format(stream, current_guess):

View File

@ -120,7 +120,7 @@ def add_pipeline_options(parser, plumber):
[
'base_font_size', 'disable_font_rescaling',
'font_size_mapping',
'line_height',
'line_height', 'minimum_line_height',
'linearize_tables',
'extra_css', 'smarten_punctuation',
'margin_top', 'margin_left', 'margin_right',

View File

@ -160,13 +160,30 @@ OptionRecommendation(name='disable_font_rescaling',
)
),
OptionRecommendation(name='minimum_line_height',
recommended_value=120.0, level=OptionRecommendation.LOW,
help=_(
'The minimum line height, as a percentage of the element\'s '
'calculated font size. calibre will ensure that every element '
'has a line height of at least this setting, irrespective of '
'what the input document specifies. Set to zero to disable. '
'Default is 120%. Use this setting in preference to '
'the direct line height specification, unless you know what '
'you are doing. For example, you can achieve "double spaced" '
'text by setting this to 240.'
)
),
OptionRecommendation(name='line_height',
recommended_value=0, level=OptionRecommendation.LOW,
help=_('The line height in pts. Controls spacing between consecutive '
'lines of text. By default no line height manipulation is '
'performed.'
)
help=_(
'The line height in pts. Controls spacing between consecutive '
'lines of text. Only applies to elements that do not define '
'their own line height. In most cases, the minimum line height '
'option is more useful. '
'By default no line height manipulation is performed.'
)
),
OptionRecommendation(name='linearize_tables',

View File

@ -101,6 +101,13 @@ class EPUBOutput(OutputFormatPlugin):
)
),
OptionRecommendation(name='epub_flatten', recommended_value=False,
help=_('This option is needed only if you intend to use the EPUB'
' with FBReaderJ. It will flatten the file system inside the'
' EPUB, putting all files into the top level.')
),
])
recommendations = set([('pretty_print', True, OptionRecommendation.HIGH)])
@ -142,8 +149,12 @@ class EPUBOutput(OutputFormatPlugin):
def convert(self, oeb, output_path, input_plugin, opts, log):
self.log, self.opts, self.oeb = log, opts, oeb
#from calibre.ebooks.oeb.transforms.filenames import UniqueFilenames
#UniqueFilenames()(oeb, opts)
if self.opts.epub_flatten:
from calibre.ebooks.oeb.transforms.filenames import FlatFilenames
FlatFilenames()(oeb, opts)
else:
from calibre.ebooks.oeb.transforms.filenames import UniqueFilenames
UniqueFilenames()(oeb, opts)
self.workaround_ade_quirks()
self.workaround_webkit_quirks()

View File

@ -6,6 +6,7 @@ __copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from uuid import uuid4
import time
from calibre.constants import __appname__, __version__
from calibre import strftime, prepare_string_for_xml as xml
@ -103,7 +104,7 @@ def sony_metadata(oeb):
publisher=xml(publisher), issue_date=xml(date),
language=xml(language))
updated = strftime('%Y-%m-%dT%H:%M:%SZ')
updated = strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
def cal_id(x):
for k, v in x.attrib.items():

View File

@ -27,13 +27,10 @@ class FB2MLizer(object):
'''
Todo: * Include more FB2 specific tags in the conversion.
* Handle a tags.
* Figure out some way to turn oeb_book.toc items into <section><title>
<p> to allow for readers to generate toc from the document.
'''
def __init__(self, log):
self.log = log
self.image_hrefs = {}
self.reset_state()
def reset_state(self):
@ -43,17 +40,25 @@ class FB2MLizer(object):
# in different directories. FB2 images are all in a flat layout so we rename all images
# into a sequential numbering system to ensure there are no collisions between image names.
self.image_hrefs = {}
# Mapping of toc items and their
self.toc = {}
# Used to see whether a new <section> needs to be opened
self.section_level = 0
def extract_content(self, oeb_book, opts):
self.log.info('Converting XHTML to FB2 markup...')
self.oeb_book = oeb_book
self.opts = opts
self.reset_state()
# Used for adding <section>s and <title>s to allow readers
# to generate toc from the document.
if self.opts.sectionize == 'toc':
self.create_flat_toc(self.oeb_book.toc, 1)
return self.fb2mlize_spine()
def fb2mlize_spine(self):
self.reset_state()
output = [self.fb2_header()]
output.append(self.get_text())
output.append(self.fb2mlize_images())
@ -66,13 +71,23 @@ class FB2MLizer(object):
return u'<?xml version="1.0" encoding="UTF-8"?>' + output
def clean_text(self, text):
text = re.sub(r'(?miu)<section>\s*</section>', '', text)
text = re.sub(r'(?miu)\s+</section>', '</section>', text)
text = re.sub(r'(?miu)</section><section>', '</section>\n\n<section>', text)
text = re.sub(r'(?miu)<p>\s*</p>', '', text)
text = re.sub(r'(?miu)\s+</p>', '</p>', text)
text = re.sub(r'(?miu)</p><p>', '</p>\n\n<p>', text)
text = re.sub(r'(?miu)\s*</p>', '</p>', text)
text = re.sub(r'(?miu)</p>\s*<p>', '</p>\n\n<p>', text)
text = re.sub(r'(?miu)<title>\s*</title>', '', text)
text = re.sub(r'(?miu)\s+</title>', '</title>', text)
text = re.sub(r'(?miu)<section>\s*</section>', '', text)
text = re.sub(r'(?miu)\s*</section>', '\n</section>', text)
text = re.sub(r'(?miu)</section>\s*', '</section>\n\n', text)
text = re.sub(r'(?miu)\s*<section>', '\n<section>', text)
text = re.sub(r'(?miu)<section>\s*', '<section>\n', text)
text = re.sub(r'(?miu)</section><section>', '</section>\n\n<section>', text)
if self.opts.insert_blank_line:
text = re.sub(r'(?miu)</p>', '</p><empty-line />', text)
return text
def fb2_header(self):
@ -140,12 +155,34 @@ class FB2MLizer(object):
def get_text(self):
text = ['<body>']
# Create main section if there are no others to create
if self.opts.sectionize == 'nothing':
text.append('<section>')
self.section_level += 1
for item in self.oeb_book.spine:
self.log.debug('Converting %s to FictionBook2 XML' % item.href)
stylizer = Stylizer(item.data, item.href, self.oeb_book, self.opts, self.opts.output_profile)
text.append('<section>')
# Start a <section> if we must sectionize each file or if the TOC references this page
page_section_open = False
if self.opts.sectionize == 'files' or self.toc.get(item.href) == 'page':
text.append('<section>')
page_section_open = True
self.section_level += 1
text += self.dump_text(item.data.find(XHTML('body')), stylizer, item)
if page_section_open:
text.append('</section>')
self.section_level -= 1
# Close any open sections
while self.section_level > 0:
text.append('</section>')
self.section_level -= 1
return ''.join(text) + '</body>'
def fb2mlize_images(self):
@ -180,6 +217,17 @@ class FB2MLizer(object):
'%s.' % (item.href, e))
return ''.join(images)
def create_flat_toc(self, nodes, level):
for item in nodes:
href, mid, id = item.href.partition('#')
if not id:
self.toc[href] = 'page'
else:
if not self.toc.get(href, None):
self.toc[href] = {}
self.toc[href][id] = level
self.create_flat_toc(item.nodes, level + 1)
def ensure_p(self):
if self.in_p:
return [], []
@ -250,10 +298,38 @@ class FB2MLizer(object):
# First tag in tree
tag = barename(elem_tree.tag)
# Convert TOC entries to <title>s and add <section>s
if self.opts.sectionize == 'toc':
# A section cannot be a child of any other element than another section,
# so leave the tag alone if there are parents
if not tag_stack:
# There are two reasons to start a new section here: the TOC pointed to
# this page (then we use the first non-<body> on the page as a <title>), or
# the TOC pointed to a specific element
newlevel = 0
toc_entry = self.toc.get(page.href, None)
if toc_entry == 'page':
if tag != 'body' and hasattr(elem_tree, 'text') and elem_tree.text:
newlevel = 1
self.toc[page.href] = None
elif toc_entry and elem_tree.attrib.get('id', None):
newlevel = toc_entry.get(elem_tree.attrib.get('id', None), None)
# Start a new section if necessary
if newlevel:
if not (newlevel > self.section_level):
fb2_out.append('</section>')
self.section_level -= 1
fb2_out.append('<section>')
self.section_level += 1
fb2_out.append('<title>')
tags.append('title')
if self.section_level == 0:
# If none of the prior processing made a section, make one now to be FB2 spec compliant
fb2_out.append('<section>')
self.section_level += 1
# Process the XHTML tag if it needs to be converted to an FB2 tag.
if tag == 'h1' and self.opts.h1_to_title or tag == 'h2' and self.opts.h2_to_title or tag == 'h3' and self.opts.h3_to_title:
fb2_out.append('<title>')
tags.append('title')
if tag == 'img':
if elem_tree.attrib.get('src', None):
# Only write the image tag if it is in the manifest.
@ -293,6 +369,18 @@ class FB2MLizer(object):
s_out, s_tags = self.handle_simple_tag('emphasis', tag_stack+tags)
fb2_out += s_out
tags += s_tags
elif tag in ('del', 'strike'):
s_out, s_tags = self.handle_simple_tag('strikethrough', tag_stack+tags)
fb2_out += s_out
tags += s_tags
elif tag == 'sub':
s_out, s_tags = self.handle_simple_tag('sub', tag_stack+tags)
fb2_out += s_out
tags += s_tags
elif tag == 'sup':
s_out, s_tags = self.handle_simple_tag('sup', tag_stack+tags)
fb2_out += s_out
tags += s_tags
# Processes style information.
if style['font-style'] == 'italic':
@ -303,6 +391,10 @@ class FB2MLizer(object):
s_out, s_tags = self.handle_simple_tag('strong', tag_stack+tags)
fb2_out += s_out
tags += s_tags
elif style['text-decoration'] == 'line-through':
s_out, s_tags = self.handle_simple_tag('strikethrough', tag_stack+tags)
fb2_out += s_out
tags += s_tags
# Process element text.
if hasattr(elem_tree, 'text') and elem_tree.text:

View File

@ -16,15 +16,15 @@ class FB2Output(OutputFormatPlugin):
file_type = 'fb2'
options = set([
OptionRecommendation(name='h1_to_title',
recommended_value=False, level=OptionRecommendation.LOW,
help=_('Wrap all h1 tags with fb2 title elements.')),
OptionRecommendation(name='h2_to_title',
recommended_value=False, level=OptionRecommendation.LOW,
help=_('Wrap all h2 tags with fb2 title elements.')),
OptionRecommendation(name='h3_to_title',
recommended_value=False, level=OptionRecommendation.LOW,
help=_('Wrap all h3 tags with fb2 title elements.')),
OptionRecommendation(name='sectionize',
recommended_value='files', level=OptionRecommendation.LOW,
choices=['toc', 'files', 'nothing'],
help=_('Specify the sectionization of elements. '
'A value of "nothing" turns the book into a single section. '
'A value of "files" turns each file into a separate section; use this if your device is having trouble. '
'A value of "Table of Contents" turns the entries in the Table of Contents into titles and creates sections; '
'if it fails, adjust the "Structure Detection" and/or "Table of Contents" settings '
'(turn on "Force use of auto-generated Table of Contents).')),
])
def convert(self, oeb_book, output_path, input_plugin, opts, log):

View File

@ -314,6 +314,8 @@ class HTMLInput(InputFormatPlugin):
rewrite_links, urlnormalize, urldefrag, BINARY_MIME, OEB_STYLES, \
xpath
from calibre import guess_type
from calibre.ebooks.oeb.transforms.metadata import \
meta_info_to_oeb_metadata
import cssutils
self.OEB_STYLES = OEB_STYLES
oeb = create_oebbook(log, None, opts, self,
@ -321,15 +323,7 @@ class HTMLInput(InputFormatPlugin):
self.oeb = oeb
metadata = oeb.metadata
if mi.title:
metadata.add('title', mi.title)
if mi.authors:
for a in mi.authors:
metadata.add('creator', a, attrib={'role':'aut'})
if mi.publisher:
metadata.add('publisher', mi.publisher)
if mi.isbn:
metadata.add('identifier', mi.isbn, attrib={'scheme':'ISBN'})
meta_info_to_oeb_metadata(mi, metadata, log)
if not metadata.language:
oeb.logger.warn(u'Language not specified')
metadata.add('language', get_lang().replace('_', '-'))

View File

@ -55,8 +55,12 @@ except:
_ignore_starts = u'\'"'+u''.join(unichr(x) for x in range(0x2018, 0x201e)+[0x2032, 0x2033])
def title_sort(title):
def title_sort(title, order=None):
if order is None:
order = tweaks['title_series_sorting']
title = title.strip()
if order == 'strictly_alphabetic':
return title
if title and title[0] in _ignore_starts:
title = title[1:]
match = _title_pat.search(title)

Some files were not shown because too many files have changed in this diff Show More