This commit is contained in:
GRiker 2011-04-29 13:37:27 -06:00
commit 79249a8429
84 changed files with 74062 additions and 60833 deletions

View File

@ -19,6 +19,78 @@
# new recipes:
# - title:
- version: 0.7.58
date: 2011-04-29
new features:
- title: "Support for converting and reading metadata from Plucker format PDB files"
type: major
- title: "The metadata that is displayed in the book details panel on the right is now completely configurable via Preferences->Look & Feel"
- title: "Add a column that shows the date when the metadata of a book record was last modified in calibre. To see the column, right click on the column headers in calibre and select Show column->Modified. Note that the dates may be incorrect for books added with older versions of calibre."
- title: "Add command line option to shutdown running calibre"
- title: "CHM Input: Store extracted files in the input/ sub dir for easy debugging when --debug-pipeline is specified"
- title: "Add a popup menu to the 'Create saved search button' to allow easy deleting of saved searches"
bug fixes:
- title: "Fix regression that broke converting to LIT in 0.7.57"
tickets: [769334]
- title: "Conversion pipeline: Remove encoding declarations from input HTML documents to guarantee that there is only a single encoding declaration in the output HTML."
tickets: [773337]
- title: "Correctly parenthesize searches that are used to make search restrictions"
- title: "Fix ratings in save to disk templates not being divided by 2"
- title: "TXT to EPUB: Underlined words (following quotes?) fail to become italics"
tickets: [772267]
- title: "Fix template function source code unavailable when not running calibre from source"
- title: "Fix adding html books from the top of a deep folder hierarchy very slow"
- title: "Only set language in MOBI metadata if it is not null"
- title: "Fix 'count-of' searches (e.g., tags:#>3)."
tickets: [771175]
- title: "Fix regression that broke connection to iTunes in some cases"
tickets: [771164]
- title: "Fix buggy regex that made converting PDFs with the string ****************** very slow"
tickets: [770534]
- title: "Fix Ctrl+L shortcut to lookup word not working in ebook viewer"
tickets: [769492]
- title: "Fix regression that broke searching on boolean columns"
improved recipes:
- HBR Blogs
- The Marker
- Financial Times
- Clarin
- Honolulu Star Advertiser
new recipes:
- title: Novi Standard
author: Darko Miletic
- title: Autobild.ro and Social Diva
author: Silviu Cotoara
- title: Novinky
author: Tomas Latal
- title: "De Volksrant (subscriber version)"
author: Selcal
- version: 0.7.57
date: 2011-04-22

55
recipes/autobild.recipe Normal file
View File

@ -0,0 +1,55 @@
# -*- coding: utf-8 -*-
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = u'2011, Silviu Cotoar\u0103'
'''
auto-bild.ro
'''
from calibre.web.feeds.news import BasicNewsRecipe
class AutoBild(BasicNewsRecipe):
title = u'Auto Bild'
__author__ = u'Silviu Cotoar\u0103'
description = 'Auto'
publisher = 'Auto Bild'
oldest_article = 50
language = 'ro'
max_articles_per_feed = 100
no_stylesheets = True
use_embedded_content = False
category = 'Ziare,Reviste,Auto'
encoding = 'utf-8'
cover_url = 'http://www.auto-bild.ro/images/autobild.gif'
conversion_options = {
'comments' : description
,'tags' : category
,'language' : language
,'publisher' : publisher
}
keep_only_tags = [
dict(name='div', attrs={'class':'box_2 articol clearfix'})
]
remove_tags = [
dict(name='div', attrs={'class':['detail']})
, dict(name='a', attrs={'id':['zoom_link']})
, dict(name='div', attrs={'class':['icons clearfix']})
, dict(name='div', attrs={'class':['pub_articol clearfix']})
]
remove_tags_after = [
dict(name='div', attrs={'class':['pub_articol clearfix']})
]
feeds = [
(u'Feeds', u'http://www.auto-bild.ro/rss/toate')
]
def preprocess_html(self, soup):
return self.adeify_images(soup)

View File

@ -0,0 +1,55 @@
__license__ = 'GPL v3'
__author__ = 'Joan Tur, based on El Pais version by Jordi Balcells & elargentino.com version by Darko Miletic'
description = 'Principal periodico de las islas Pitiusas, Ibiza y Formentera (Espanya) - v1.06 (29/04/2011)'
__docformat__ = 'restructuredtext en'
'''
diariodeibiza.es
'''
from calibre.web.feeds.news import BasicNewsRecipe
class DiarioDeIbiza(BasicNewsRecipe):
__author__ = 'Joan Tur, cullet'
description = 'Principal periodico de las islas Pitiusas, Ibiza y Formentera (Espanya) - v1.06 (29/04/2011)'
cover_url = 'http://estaticos01.diariodeibiza.es//elementosWeb/mediaweb/images/logo.jpg'
title = u'Diario de Ibiza digital'
publisher = u'Editorial Prensa Iberica'
category = 'News, politics, culture, economy, general interest'
language = 'es'
encoding = 'iso-8859-1'
timefmt = '[%a, %d %b, %Y]'
oldest_article = 2
max_articles_per_feed = 20
use_embedded_content = False
recursion = 5
remove_javascript = True
no_stylesheets = True
keep_only_tags = [
dict(name='div', attrs={'class':['noticia_titular','epigrafe','subtitulo','actualizada','noticia_fecha','noticia_texto']}),
dict(name='font', attrs={'class':['actualizada']})
]
feeds = [
(u'Portada de Ibiza', u'http://www.diariodeibiza.es/elementosInt/rss/1'),
(u'Pitiuses i Balears', u'http://www.diariodeibiza.es/elementosInt/rss/2'),
(u'Opini\xf3n', u'http://www.diariodeibiza.es/elementosInt/rss/3'),
(u'Nacional', u'http://www.diariodeibiza.es/elementosInt/rss/4'),
(u'Internacional', u'http://www.diariodeibiza.es/elementosInt/rss/5'),
(u'Econom\xeda', u'http://www.diariodeibiza.es/elementosInt/rss/6'),
(u'Deportes', u'http://www.diariodeibiza.es/elementosInt/rss/7'),
(u'Sociedad', u'http://www.diariodeibiza.es/elementosInt/rss/8'),
(u'Ciencia', u'http://www.diariodeibiza.es/elementosInt/rss/11'),
(u'Tecnolog\xeda', u'http://www.diariodeibiza.es/elementosInt/rss/12'),
(u'Gente', u'http://www.diariodeibiza.es/elementosInt/rss/13'),
(u'Sucesos', u'http://www.diariodeibiza.es/elementosInt/rss/15'),
(u'Cultura', u'http://www.diariodeibiza.es/elementosInt/rss/16Piti')
]

View File

@ -1,9 +1,6 @@
from calibre.web.feeds.news import BasicNewsRecipe
import re
# Needed for BLOGs
from calibre.web.feeds import Feed
class HBR(BasicNewsRecipe):
title = 'Harvard Business Review Blogs'
@ -32,6 +29,7 @@ class HBR(BasicNewsRecipe):
feeds = [('Blog','http://feeds.harvardbusiness.org/harvardbusiness')]
oldest_article = 30
max_articles_per_feed = 100
use_embedded_content = False
else:
timefmt = ' [%B %Y]'
@ -59,9 +57,9 @@ class HBR(BasicNewsRecipe):
def get_browser(self):
br = BasicNewsRecipe.get_browser(self)
br.open(self.LOGIN_URL)
br.select_form(name='signInForm')
br['signInForm:username'] = self.username
br['signInForm:password'] = self.password
br.select_form(name='signin-form')
br['signin-form:username'] = self.username
br['signin-form:password'] = self.password
raw = br.submit().read()
if 'My Account' not in raw:
raise Exception('Failed to login, are you sure your username and password are correct?')
@ -161,27 +159,13 @@ class HBR(BasicNewsRecipe):
return startDate, endDate
#-------------------------------------------------------------------------------------------------
def hbr_parse_blogs(self, feeds):
# Do the "official" parse_feeds first
rssFeeds = Feed()
# Use the PARSE_FEEDS method to get a Feeds object of the articles
rssFeeds = BasicNewsRecipe.parse_feeds(self)
# Create a new feed of the right configuration and append to existing afeeds
self.feed_to_index_append(rssFeeds[:], feeds)
#-------------------------------------------------------------------------------------------------
def parse_index(self):
if self.INCLUDE_ARTICLES == True:
soup = self.hbr_get_toc()
feeds = self.hbr_parse_toc(soup)
else:
feeds = []
# blog stuff
if self.INCLUDE_BLOGS == True:
self.hbr_parse_blogs(feeds)
return BasicNewsRecipe.parse_index(self)
return feeds
#-------------------------------------------------------------------------------------------------

BIN
recipes/icons/autobild.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 614 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

42
recipes/novinky.recipe Normal file
View File

@ -0,0 +1,42 @@
__license__ = 'GPL v3'
__copyright__ = '2011, Tomas Latal<latal.tomas at gmail.com>'
from calibre.web.feeds.news import BasicNewsRecipe
class NovinkyCZ(BasicNewsRecipe):
title = 'Novinky'
__author__ = 'Tomas Latal'
__version__ = '1.0'
__date__ = '24 April 2011'
description = 'News from server Novinky.cz'
oldest_article = 1
max_articles_per_feed = 10
encoding = 'utf8'
publisher = 'Novinky'
category = 'news, CZ'
language = 'cs'
publication_type = 'newsportal'
no_stylesheets = True
remove_javascript = True
extra_css = 'p.acmDescription{font-style:italic;} p.acmAuthor{font-size:0.8em; color:#707070}'
feeds = [
(u'Dom\xe1c\xed', u'http://www.novinky.cz/rss/domaci/'),
(u'Zahrani\u010d\xed', u'http://www.novinky.cz/rss/zahranicni/'),
(u'Krimi', u'http://www.novinky.cz/rss/krimi/'),
(u'Ekonomika', u'http://www.novinky.cz/rss/ekonomika/'),
(u'Finance', u'http://www.novinky.cz/rss/finance/'),
(u'Kultura', u'http://www.novinky.cz/rss/kultura/'),
(u'Koktejl', u'http://www.novinky.cz/rss/koktejl/'),
(u'Internet a PC', u'http://www.novinky.cz/rss/internet-a-pc/'),
(u'Auto-moto', u'http://www.novinky.cz/rss/auto/'),
]
remove_tags_before = dict(id='articleContent')
remove_tags_after = [dict(id='movedArticleAuthors')]
remove_tags = [
dict(name='div', attrs={'id':['articleColumnInfo','pictureInnerBox']}),
dict(name='p', attrs={'id':['articleDate']})
]

100
recipes/novistandard.recipe Normal file
View File

@ -0,0 +1,100 @@
__license__ = 'GPL v3'
__copyright__ = '2011, Darko Miletic <darko.miletic at gmail.com>'
'''
www.standard.rs
'''
import re
from calibre.web.feeds.news import BasicNewsRecipe
class NoviStandard(BasicNewsRecipe):
title = 'Novi Standard'
__author__ = 'Darko Miletic'
description = 'NoviStandard - energija je neunistiva!'
publisher = 'Novi Standard'
category = 'news, politics, Serbia'
no_stylesheets = True
delay = 1
oldest_article = 15
encoding = 'utf-8'
publication_type = 'magazine'
needs_subscription = 'optional'
remove_empty_feeds = True
INDEX = 'http://www.standard.rs/'
use_embedded_content = False
language = 'sr'
publication_type = 'magazine'
masthead_url = 'http://www.standard.rs/templates/ja_opal/images/red/logo.png'
extra_css = """
@font-face {font-family: "serif1";src:url(res:///opt/sony/ebook/FONT/tt0011m_.ttf)}
@font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)}
body{font-family: Arial,"Segoe UI","Trebuchet MS",Helvetica,sans1,sans-serif}
.dropcap{font-family: Georgia,Times,serif1,serif; display:inline}
.dropcap:first-letter{display: inline; font-size: xx-large; font-weight: bold}
.contentheading{color: gray; font-size: x-large}
.article-meta, .createdby{color: red}
img{margin-top:0.5em; margin-bottom: 0.7em; display: block}
"""
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
}
preprocess_regexps = [(re.compile(u'\u0110'), lambda match: u'\u00D0')]
def get_browser(self):
br = BasicNewsRecipe.get_browser()
br.open(self.INDEX)
if self.username is not None and self.password is not None:
br.select_form(name='login')
br['username'] = self.username
br['passwd' ] = self.password
br.submit()
return br
keep_only_tags =[dict(attrs={'class':['contentheading','article-meta','article-content']})]
remove_tags_after =dict(attrs={'class':'extravote-container'})
remove_tags = [
dict(name=['object','link','iframe','meta','base'])
,dict(attrs={'class':'extravote-container'})
]
remove_attributes =['border','background','height','width','align','valign','lang']
feeds = [
(u'Naslovna', u'http://www.standard.rs/index.php?format=feed&type=rss')
,(u'Politika', u'http://www.standard.rs/vesti/36-politika.html?format=feed&type=rss')
,(u'Cvijanovic preporucuje', u'http://www.standard.rs/-cvijanovi-vam-preporuuje.html?format=feed&type=rss')
,(u'Kolumne', u'http://www.standard.rs/vesti/49-kolumne.html?format=feed&type=rss')
,(u'Kultura', u'http://www.standard.rs/vesti/40-kultura.html?format=feed&type=rss')
,(u'Lifestyle', u'http://www.standard.rs/vesti/39-lifestyle.html?format=feed&type=rss')
,(u'Svet', u'http://www.standard.rs/vesti/41-svet.html?format=feed&type=rss')
,(u'Ekonomija', u'http://www.standard.rs/vesti/37-ekonomija.html?format=feed&type=rss')
,(u'Sport', u'http://www.standard.rs/vesti/38-sport.html?format=feed&type=rss')
]
def preprocess_html(self, soup):
for item in soup.findAll(style=True):
del item['style']
for item in soup.findAll('div'):
if len(item.contents) == 0:
item.extract()
for item in soup.findAll('a'):
limg = item.find('img')
if item.string is not None:
str = item.string
item.replaceWith(str)
else:
if limg:
item.name = 'div'
item.attrs = []
else:
str = self.tag_to_string(item)
item.replaceWith(str)
for item in soup.findAll('img'):
if not item.has_key('alt'):
item['alt'] = 'image'
return soup

View File

@ -4,7 +4,7 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
__appname__ = u'calibre'
numeric_version = (0, 7, 57)
numeric_version = (0, 7, 58)
__version__ = u'.'.join(map(unicode, numeric_version))
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"

View File

@ -1109,6 +1109,11 @@ class StoreAmazonKindleStore(StoreBase):
description = _('Kindle books from Amazon')
actual_plugin = 'calibre.gui2.store.amazon_plugin:AmazonKindleStore'
class StoreAmazonUKKindleStore(StoreBase):
name = 'Amazon UK Kindle'
description = _('Kindle books from Amazon.uk')
actual_plugin = 'calibre.gui2.store.amazon_uk_plugin:AmazonUKKindleStore'
class StoreBaenWebScriptionStore(StoreBase):
name = 'Baen WebScription'
description = _('Ebooks for readers.')
@ -1174,10 +1179,27 @@ class StoreSmashwordsStore(StoreBase):
description = _('Your ebook. Your way.')
actual_plugin = 'calibre.gui2.store.smashwords_plugin:SmashwordsStore'
plugins += [StoreAmazonKindleStore, StoreBaenWebScriptionStore, StoreBNStore,
class StoreWaterstonesUKStore(StoreBase):
name = 'Waterstones UK'
description = _('Feel every word')
actual_plugin = 'calibre.gui2.store.waterstones_uk_plugin:WaterstonesUKStore'
class StoreFoylesUKStore(StoreBase):
name = 'Foyles UK'
description = _('Foyles of London, online')
actual_plugin = 'calibre.gui2.store.foyles_uk_plugin:FoylesUKStore'
class AmazonDEKindleStore(StoreBase):
name = 'Amazon DE Kindle'
description = _('Kindle eBooks')
actual_plugin = 'calibre.gui2.store.amazon_de_plugin:AmazonDEKindleStore'
plugins += [StoreAmazonKindleStore, AmazonDEKindleStore, StoreAmazonUKKindleStore,
StoreBaenWebScriptionStore, StoreBNStore,
StoreBeWriteStore, StoreDieselEbooksStore, StoreEbookscomStore,
StoreEHarlequinStoretore,
StoreFeedbooksStore, StoreGutenbergStore, StoreKoboStore, StoreManyBooksStore,
StoreMobileReadStore, StoreOpenLibraryStore, StoreSmashwordsStore]
StoreEHarlequinStoretore, StoreFeedbooksStore,
StoreFoylesUKStore, StoreGutenbergStore, StoreKoboStore, StoreManyBooksStore,
StoreMobileReadStore, StoreOpenLibraryStore, StoreSmashwordsStore,
StoreWaterstonesUKStore]
# }}}

View File

@ -109,10 +109,10 @@ class ANDROID(USBMS):
'SGH-T849', '_MB300', 'A70S', 'S_ANDROID', 'A101IT', 'A70H',
'IDEOS_TABLET', 'MYTOUCH_4G', 'UMS_COMPOSITE', 'SCH-I800_CARD',
'7', 'A956', 'A955', 'A43', 'ANDROID_PLATFORM', 'TEGRA_2',
'MB860', 'MULTI-CARD', 'MID7015A', 'INCREDIBLE']
'MB860', 'MULTI-CARD', 'MID7015A', 'INCREDIBLE', 'A7EB']
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD',
'A70S', 'A101IT', '7', 'INCREDIBLE']
'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB']
OSX_MAIN_MEM = 'Android Device Main Memory'

View File

@ -81,7 +81,7 @@ class ISBNDB(Source):
author_tokens = self.get_author_tokens(authors,
only_first_author=True)
tokens += author_tokens
tokens = [quote(t) for t in tokens]
tokens = [quote(t.encode('utf-8') if isinstance(t, unicode) else t) for t in tokens]
q = '+'.join(tokens)
q = 'index1=combined&value1='+q

View File

@ -16,7 +16,7 @@ from urllib import unquote as urlunquote
from lxml import etree, html
from calibre.constants import filesystem_encoding, __version__
from calibre.translations.dynamic import translate
from calibre.ebooks.chardet import xml_to_unicode
from calibre.ebooks.chardet import xml_to_unicode, strip_encoding_declarations
from calibre.ebooks.oeb.entitydefs import ENTITYDEFS
from calibre.ebooks.conversion.preprocess import CSSPreProcessor
from calibre import isbytestring, as_unicode, get_types_map
@ -853,6 +853,7 @@ class Manifest(object):
self.oeb.log.debug('Parsing', self.href, '...')
# Convert to Unicode and normalize line endings
data = self.oeb.decode(data)
data = strip_encoding_declarations(data)
data = self.oeb.html_preprocessor(data)
# There could be null bytes in data if it had &#0; entities in it
data = data.replace('\0', '')

View File

@ -360,7 +360,7 @@ class Reader(FormatReader):
# plugin assemble the order based on hyperlinks.
with CurrentDir(output_dir):
for uid, num in self.uid_text_secion_number.items():
self.log.debug(_('Writing record with uid: %s as %s.html' % (uid, uid)))
self.log.debug('Writing record with uid: %s as %s.html' % (uid, uid))
with open('%s.html' % uid, 'wb') as htmlf:
html = u'<html><body>'
section_header, section_data = self.sections[num]
@ -466,7 +466,7 @@ class Reader(FormatReader):
if not home_html:
home_html = self.uid_text_secion_number.items()[0][0]
except:
raise Exception(_('Could not determine home.html'))
raise Exception('Could not determine home.html')
# Generate oeb from html conversion.
oeb = html_input.convert(open('%s.html' % home_html, 'rb'), self.options, 'html', self.log, {})
self.options.debug_pipeline = odi

View File

@ -0,0 +1,34 @@
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
__license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
from PyQt4.Qt import QUrl
from calibre.gui2 import open_url
from calibre.gui2.store.amazon_plugin import AmazonKindleStore
class AmazonDEKindleStore(AmazonKindleStore):
'''
For comments on the implementation, please see amazon_plugin.py
'''
search_url = 'http://www.amazon.de/s/url=search-alias%3Ddigital-text&field-keywords='
details_url = 'http://amazon.de/dp/'
drm_search_text = u'Gleichzeitige Verwendung von Geräten'
drm_free_text = u'Keine Einschränkung'
def open(self, parent=None, detail_item=None, external=False):
aff_id = {'tag': 'charhale0a-21'}
store_link = ('http://www.amazon.de/gp/redirect.html?ie=UTF8&site-redirect=de'
'&tag=%(tag)s&linkCode=ur2&camp=1638&creative=19454'
'&location=http://www.amazon.de/ebooks-kindle/b?node=530886031') % aff_id
if detail_item:
aff_id['asin'] = detail_item
store_link = ('http://www.amazon.de/gp/redirect.html?ie=UTF8'
'&location=http://www.amazon.de/dp/%(asin)s&site-redirect=de'
'&tag=%(tag)s&linkCode=ur2&camp=1638&creative=6742') % aff_id
open_url(QUrl(store_link))

View File

@ -22,6 +22,11 @@ from calibre.gui2.store.search_result import SearchResult
class AmazonKindleStore(StorePlugin):
search_url = 'http://www.amazon.com/s/url=search-alias%3Ddigital-text&field-keywords='
details_url = 'http://amazon.com/dp/'
drm_search_text = u'Simultaneous Device Usage'
drm_free_text = u'Unlimited'
def open(self, parent=None, detail_item=None, external=False):
'''
Amazon comes with a number of difficulties.
@ -117,7 +122,7 @@ class AmazonKindleStore(StorePlugin):
open_url(QUrl(store_link))
def search(self, query, max_results=10, timeout=60):
url = 'http://www.amazon.com/s/url=search-alias%3Ddigital-text&field-keywords=' + urllib2.quote(query)
url = self.search_url + urllib2.quote(query)
br = browser()
counter = max_results
@ -180,18 +185,19 @@ class AmazonKindleStore(StorePlugin):
yield s
def get_details(self, search_result, timeout):
url = 'http://amazon.com/dp/'
url = self.details_url
br = browser()
with closing(br.open(url + search_result.detail_item, timeout=timeout)) as nf:
idata = html.fromstring(nf.read())
if idata.xpath('boolean(//div[@class="content"]//li/b[contains(text(), "Simultaneous Device Usage")])'):
if idata.xpath('boolean(//div[@class="content"]//li[contains(., "Unlimited") and contains(b, "Simultaneous Device Usage")])'):
if idata.xpath('boolean(//div[@class="content"]//li/b[contains(text(), "' +
self.drm_search_text + '")])'):
if idata.xpath('boolean(//div[@class="content"]//li[contains(., "' +
self.drm_free_text + '") and contains(b, "' +
self.drm_search_text + '")])'):
search_result.drm = SearchResult.DRM_UNLOCKED
else:
search_result.drm = SearchResult.DRM_UNKNOWN
else:
search_result.drm = SearchResult.DRM_LOCKED
return True

View File

@ -0,0 +1,30 @@
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
__license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
from PyQt4.Qt import QUrl
from calibre.gui2 import open_url
from calibre.gui2.store.amazon_plugin import AmazonKindleStore
class AmazonUKKindleStore(AmazonKindleStore):
'''
For comments on the implementation, please see amazon_plugin.py
'''
search_url = 'http://www.amazon.co.uk/s/url=search-alias%3Ddigital-text&field-keywords='
details_url = 'http://amazon.co.uk/dp/'
def open(self, parent=None, detail_item=None, external=False):
aff_id = {'tag': 'calcharles-21'}
store_link = 'http://www.amazon.co.uk/gp/redirect.html?ie=UTF8&location=http://www.amazon.co.uk/Kindle-eBooks/b?ie=UTF8&node=341689031&ref_=sa_menu_kbo2&tag=%(tag)s&linkCode=ur2&camp=1634&creative=19450' % aff_id
if detail_item:
aff_id['asin'] = detail_item
store_link = 'http://www.amazon.co.uk/gp/redirect.html?ie=UTF8&location=http://www.amazon.co.uk/dp/%(asin)s&tag=%(tag)s&linkCode=ur2&camp=1634&creative=6738' % aff_id
open_url(QUrl(store_link))

View File

@ -0,0 +1,78 @@
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
__license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import urllib2
from contextlib import closing
from lxml import html
from PyQt4.Qt import QUrl
from calibre import browser, url_slash_cleaner
from calibre.gui2 import open_url
from calibre.gui2.store import StorePlugin
from calibre.gui2.store.basic_config import BasicStoreConfig
from calibre.gui2.store.search_result import SearchResult
from calibre.gui2.store.web_store_dialog import WebStoreDialog
class FoylesUKStore(BasicStoreConfig, StorePlugin):
def open(self, parent=None, detail_item=None, external=False):
url = 'http://www.awin1.com/cread.php?awinmid=1414&awinaffid=120917&clickref=&p='
url_redirect = 'http://www.foyles.co.uk'
if external or self.config.get('open_external', False):
if detail_item:
url = url + url_redirect + detail_item
open_url(QUrl(url_slash_cleaner(url)))
else:
detail_url = None
if detail_item:
detail_url = url + url_redirect + detail_item
d = WebStoreDialog(self.gui, url, parent, detail_url)
d.setWindowTitle(self.name)
d.set_tags(self.config.get('tags', ''))
d.exec_()
def search(self, query, max_results=10, timeout=60):
url = 'http://www.foyles.co.uk/Public/Shop/Search.aspx?fFacetId=1015&searchBy=1&quick=true&term=' + urllib2.quote(query)
br = browser()
counter = max_results
with closing(br.open(url, timeout=timeout)) as f:
doc = html.fromstring(f.read())
for data in doc.xpath('//table[contains(@id, "MainContent")]/tr/td/div[contains(@class, "Item")]'):
if counter <= 0:
break
id = ''.join(data.xpath('.//a[@class="Title"]/@href')).strip()
if not id:
continue
cover_url = ''.join(data.xpath('.//a[@class="Jacket"]/img/@src'))
if cover_url:
cover_url = 'http://www.foyles.co.uk' + cover_url
#print(cover_url)
title = ''.join(data.xpath('.//a[@class="Title"]/text()'))
author = ', '.join(data.xpath('.//span[@class="Author"]/text()'))
price = ''.join(data.xpath('./ul/li[@class="Strong"]/text()'))
price = price[price.rfind(' '):]
counter -= 1
s = SearchResult()
s.cover_url = cover_url
s.title = title.strip()
s.author = author.strip()
s.price = price
s.detail_item = id
s.drm = SearchResult.DRM_LOCKED
s.formats = 'EPUB'
yield s

View File

@ -0,0 +1,84 @@
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
__license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import urllib2
from contextlib import closing
from lxml import html
from PyQt4.Qt import QUrl
from calibre import browser
from calibre.gui2 import open_url
from calibre.gui2.store import StorePlugin
from calibre.gui2.store.basic_config import BasicStoreConfig
from calibre.gui2.store.search_result import SearchResult
from calibre.gui2.store.web_store_dialog import WebStoreDialog
class WaterstonesUKStore(BasicStoreConfig, StorePlugin):
def open(self, parent=None, detail_item=None, external=False):
url = 'http://clkuk.tradedoubler.com/click?p=51196&a=1951604&g=19333484'
url_details = 'http://clkuk.tradedoubler.com/click?p(51196)a(1951604)g(16460516)url({0})'
if external or self.config.get('open_external', False):
if detail_item:
url = url_details.format(detail_item)
open_url(QUrl(url))
else:
detail_url = None
if detail_item:
detail_url = url_details.format(detail_item)
d = WebStoreDialog(self.gui, url, parent, detail_url)
d.setWindowTitle(self.name)
d.set_tags(self.config.get('tags', ''))
d.exec_()
def search(self, query, max_results=10, timeout=60):
url = 'http://www.waterstones.com/waterstonesweb/advancedSearch.do?buttonClicked=1&format=3757&bookkeywords=' + urllib2.quote(query)
br = browser()
counter = max_results
with closing(br.open(url, timeout=timeout)) as f:
doc = html.fromstring(f.read())
for data in doc.xpath('//div[contains(@class, "results-pane")]'):
if counter <= 0:
break
id = ''.join(data.xpath('./div/div/h2/a/@href')).strip()
if not id:
continue
cover_url = ''.join(data.xpath('.//div[@class="image"]/a/img/@src'))
title = ''.join(data.xpath('./div/div/h2/a/text()'))
author = ', '.join(data.xpath('.//p[@class="byAuthor"]/a/text()'))
price = ''.join(data.xpath('.//p[@class="price"]/span[@class="priceStandard"]/text()'))
drm = data.xpath('boolean(.//td[@headers="productFormat" and contains(., "DRM")])')
pdf = data.xpath('boolean(.//td[@headers="productFormat" and contains(., "PDF")])')
epub = data.xpath('boolean(.//td[@headers="productFormat" and contains(., "EPUB")])')
counter -= 1
s = SearchResult()
s.cover_url = cover_url
s.title = title.strip()
s.author = author.strip()
s.price = price
if drm:
s.drm = SearchResult.DRM_LOCKED
else:
s.drm = SearchResult.DRM_UNKNOWN
s.detail_item = id
formats = []
if epub:
formats.append('EPUB')
if pdf:
formats.append('PDF')
s.formats = ', '.join(formats)
yield s

View File

@ -783,7 +783,7 @@ class ResultCache(SearchQueryParser): # {{{
else:
q = query
if search_restriction:
q = u'%s (%s)' % (search_restriction, query)
q = u'(%s) and (%s)' % (search_restriction, query)
if not q:
if set_restriction_count:
self.search_restriction_book_count = len(self._map)

View File

@ -3217,7 +3217,6 @@ books_series_link feeds
if callable(callback):
if callback(''):
break
return duplicates
def add_custom_book_data(self, book_id, name, val):
@ -3226,12 +3225,19 @@ books_series_link feeds
raise ValueError('add_custom_book_data: no such book_id %d'%book_id)
# Do the json encode first, in case it throws an exception
s = json.dumps(val, default=to_json)
self.conn.execute('DELETE FROM books_plugin_data WHERE book=? AND name=?',
(book_id, name))
self.conn.execute('''INSERT INTO books_plugin_data(book, name, val)
self.conn.execute('''INSERT OR REPLACE INTO books_plugin_data(book, name, val)
VALUES(?, ?, ?)''', (book_id, name, s))
self.commit()
def add_multiple_custom_book_data(self, name, vals, delete_first=False):
if delete_first:
self.conn.execute('DELETE FROM books_plugin_data WHERE name=?', (name, ))
self.conn.executemany(
'INSERT OR REPLACE INTO books_plugin_data (book, name, val) VALUES (?, ?, ?)',
[(book_id, name, json.dumps(val, default=to_json))
for book_id, val in vals.iteritems()])
self.commit()
def get_custom_book_data(self, book_id, name, default=None):
try:
s = self.conn.get('''select val FROM books_plugin_data
@ -3243,11 +3249,29 @@ books_series_link feeds
pass
return default
def get_all_custom_book_data(self, name, default=None):
try:
s = self.conn.get('''select book, val FROM books_plugin_data
WHERE name=?''', (name,))
if s is None:
return default
res = {}
for r in s:
res[r[0]] = json.loads(r[1], object_hook=from_json)
return res
except:
pass
return default
def delete_custom_book_data(self, book_id, name):
self.conn.execute('DELETE FROM books_plugin_data WHERE book=? AND name=?',
(book_id, name))
self.commit()
def delete_all_custom_book_data(self, name):
self.conn.execute('DELETE FROM books_plugin_data WHERE name=?', (name, ))
self.commit()
def get_ids_for_custom_book_data(self, name):
s = self.conn.get('''SELECT book FROM books_plugin_data WHERE name=?''', (name,))
return [x[0] for x in s]

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff