mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Merge from trunk
This commit is contained in:
commit
b3728f7248
64
recipes/defensenews.recipe
Normal file
64
recipes/defensenews.recipe
Normal file
@ -0,0 +1,64 @@
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Darko Miletic <darko.miletic at gmail.com>'
|
||||
'''
|
||||
www.defensenews.com
|
||||
'''
|
||||
|
||||
import re
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
||||
|
||||
class DefenseNews(BasicNewsRecipe):
|
||||
title = 'Defense News'
|
||||
__author__ = 'Darko Miletic'
|
||||
description = 'Find late-breaking defense news from the leading defense news weekly'
|
||||
publisher = 'Gannett Government Media Corporation'
|
||||
category = 'defense news, defence news, defense, defence, defence budget, defence policy'
|
||||
oldest_article = 31
|
||||
max_articles_per_feed = 200
|
||||
no_stylesheets = True
|
||||
encoding = 'utf8'
|
||||
use_embedded_content = False
|
||||
language = 'en'
|
||||
remove_empty_feeds = True
|
||||
publication_type = 'newspaper'
|
||||
masthead_url = 'http://www.defensenews.com/images/logo_defensenews2.jpg'
|
||||
extra_css = """
|
||||
body{font-family: Arial,Helvetica,sans-serif }
|
||||
img{margin-bottom: 0.4em; display:block}
|
||||
.info{font-size: small; color: gray}
|
||||
"""
|
||||
|
||||
conversion_options = {
|
||||
'comment' : description
|
||||
, 'tags' : category
|
||||
, 'publisher' : publisher
|
||||
, 'language' : language
|
||||
}
|
||||
|
||||
remove_tags = [
|
||||
dict(name=['meta','link'])
|
||||
,dict(attrs={'class':['toolbar','related','left','right']})
|
||||
]
|
||||
remove_tags_before = attrs={'class':'storyWrp'}
|
||||
remove_tags_after = attrs={'class':'middle'}
|
||||
|
||||
remove_attributes=['lang']
|
||||
|
||||
feeds = [
|
||||
(u'Europe' , u'http://www.defensenews.com/rss/eur/' )
|
||||
,(u'Americas', u'http://www.defensenews.com/rss/ame/' )
|
||||
,(u'Asia & Pacific rim', u'http://www.defensenews.com/rss/asi/' )
|
||||
,(u'Middle east & Africa', u'http://www.defensenews.com/rss/mid/')
|
||||
,(u'Air', u'http://www.defensenews.com/rss/air/' )
|
||||
,(u'Land', u'http://www.defensenews.com/rss/lan/' )
|
||||
,(u'Naval', u'http://www.defensenews.com/rss/sea/' )
|
||||
]
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
for item in soup.findAll(style=True):
|
||||
del item['style']
|
||||
for item in soup.findAll('img'):
|
||||
if not item.has_key('alt'):
|
||||
item['alt'] = 'image'
|
||||
return soup
|
@ -22,8 +22,6 @@ class Economist(BasicNewsRecipe):
|
||||
' perspective. Best downloaded on Friday mornings (GMT)')
|
||||
extra_css = '.headline {font-size: x-large;} \n h2 { font-size: small; } \n h1 { font-size: medium; }'
|
||||
oldest_article = 7.0
|
||||
cover_url = 'http://media.economist.com/sites/default/files/imagecache/print-cover-thumbnail/print-covers/currentcoverus_large.jpg'
|
||||
#cover_url = 'http://www.economist.com/images/covers/currentcoverus_large.jpg'
|
||||
remove_tags = [
|
||||
dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent']),
|
||||
dict(attrs={'class':['dblClkTrk', 'ec-article-info',
|
||||
@ -56,6 +54,14 @@ class Economist(BasicNewsRecipe):
|
||||
return br
|
||||
'''
|
||||
|
||||
def get_cover_url(self):
|
||||
br = self.browser
|
||||
br.open(self.INDEX)
|
||||
issue = br.geturl().split('/')[4]
|
||||
self.log('Fetching cover for issue: %s'%issue)
|
||||
cover_url = "http://media.economist.com/sites/default/files/imagecache/print-cover-full/print-covers/%s_CNA400.jpg" %(issue.translate(None,'-'))
|
||||
return cover_url
|
||||
|
||||
def parse_index(self):
|
||||
return self.economist_parse_index()
|
||||
|
||||
|
@ -22,8 +22,6 @@ class Economist(BasicNewsRecipe):
|
||||
' perspective. Best downloaded on Friday mornings (GMT)')
|
||||
extra_css = '.headline {font-size: x-large;} \n h2 { font-size: small; } \n h1 { font-size: medium; }'
|
||||
oldest_article = 7.0
|
||||
cover_url = 'http://media.economist.com/sites/default/files/imagecache/print-cover-thumbnail/print-covers/currentcoverus_large.jpg'
|
||||
#cover_url = 'http://www.economist.com/images/covers/currentcoverus_large.jpg'
|
||||
remove_tags = [
|
||||
dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent']),
|
||||
dict(attrs={'class':['dblClkTrk', 'ec-article-info',
|
||||
@ -40,6 +38,14 @@ class Economist(BasicNewsRecipe):
|
||||
# downloaded with connection reset by peer (104) errors.
|
||||
delay = 1
|
||||
|
||||
def get_cover_url(self):
|
||||
br = self.browser
|
||||
br.open(self.INDEX)
|
||||
issue = br.geturl().split('/')[4]
|
||||
self.log('Fetching cover for issue: %s'%issue)
|
||||
cover_url = "http://media.economist.com/sites/default/files/imagecache/print-cover-full/print-covers/%s_CNA400.jpg" %(issue.translate(None,'-'))
|
||||
return cover_url
|
||||
|
||||
|
||||
def parse_index(self):
|
||||
try:
|
||||
|
27
recipes/merco_press.recipe
Normal file
27
recipes/merco_press.recipe
Normal file
@ -0,0 +1,27 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class MercoPress(BasicNewsRecipe):
|
||||
title = u'Merco Press'
|
||||
description = u"Read News, Stories and Insight Analysis from Latin America and Mercosur. Politics, Economy, Business and Investments in South America."
|
||||
cover_url = 'http://en.mercopress.com/web/img/en/mercopress-logo.gif'
|
||||
|
||||
__author__ = 'Russell Phillips'
|
||||
language = 'en'
|
||||
|
||||
oldest_article = 7
|
||||
max_articles_per_feed = 100
|
||||
auto_cleanup = True
|
||||
|
||||
extra_css = 'img{padding-bottom:1ex; display:block; text-align: center;}'
|
||||
remove_tags = [dict(name='a')]
|
||||
|
||||
feeds = [('Antarctica', 'http://en.mercopress.com/rss/antarctica'),
|
||||
('Argentina', 'http://en.mercopress.com/rss/argentina'),
|
||||
('Brazil', 'http://en.mercopress.com/rss/brazil'),
|
||||
('Falkland Islands', 'http://en.mercopress.com/rss/falkland-islands'),
|
||||
('International News', 'http://en.mercopress.com/rss/international'),
|
||||
('Latin America', 'http://en.mercopress.com/rss/latin-america'),
|
||||
('Mercosur', 'http://en.mercopress.com/rss/mercosur'),
|
||||
('Paraguay', 'http://en.mercopress.com/rss/paraguay'),
|
||||
('United States', 'http://en.mercopress.com/rss/united-states'),
|
||||
('Uruguay://en.mercopress.com/rss/uruguay')]
|
17
recipes/penguin_news.recipe
Normal file
17
recipes/penguin_news.recipe
Normal file
@ -0,0 +1,17 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class MercoPress(BasicNewsRecipe):
|
||||
title = u'Penguin News'
|
||||
description = u"Penguin News: the Falkland Islands' only newspaper."
|
||||
cover_url = 'http://www.penguin-news.com/templates/rt_syndicate_j15/images/logo/light/logo1.png'
|
||||
language = 'en'
|
||||
|
||||
__author__ = 'Russell Phillips'
|
||||
|
||||
oldest_article = 7
|
||||
max_articles_per_feed = 100
|
||||
auto_cleanup = True
|
||||
|
||||
extra_css = 'img{padding-bottom:1ex; display:block; text-align: center;}'
|
||||
|
||||
feeds = [(u'Penguin News - Falkland Islands', u'http://www.penguin-news.com/index.php?format=feed&type=rss')]
|
17
recipes/wow.recipe
Normal file
17
recipes/wow.recipe
Normal file
@ -0,0 +1,17 @@
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class WoW(BasicNewsRecipe):
|
||||
title = u'WoW Insider'
|
||||
language = 'en'
|
||||
__author__ = 'Krittika Goyal'
|
||||
oldest_article = 1 #days
|
||||
max_articles_per_feed = 25
|
||||
use_embedded_content = False
|
||||
|
||||
no_stylesheets = True
|
||||
auto_cleanup = True
|
||||
|
||||
feeds = [
|
||||
('WoW',
|
||||
'http://wow.joystiq.com/rss.xml')
|
||||
]
|
@ -225,7 +225,10 @@ except:
|
||||
try:
|
||||
HOST=get_ip_address('wlan0')
|
||||
except:
|
||||
HOST='192.168.1.2'
|
||||
try:
|
||||
HOST=get_ip_address('ppp0')
|
||||
except:
|
||||
HOST='192.168.1.2'
|
||||
|
||||
PROJECT=os.path.basename(os.path.abspath('.'))
|
||||
|
||||
|
@ -20,17 +20,23 @@ for x in [
|
||||
EXCLUDES.extend(['--exclude', x])
|
||||
SAFE_EXCLUDES = ['"%s"'%x if '*' in x else x for x in EXCLUDES]
|
||||
|
||||
def get_rsync_pw():
|
||||
return open('/home/kovid/work/kde/conf/buildbot').read().partition(
|
||||
':')[-1].strip()
|
||||
|
||||
class Rsync(Command):
|
||||
|
||||
description = 'Sync source tree from development machine'
|
||||
|
||||
SYNC_CMD = ' '.join(BASE_RSYNC+SAFE_EXCLUDES+
|
||||
['rsync://{host}/work/{project}', '..'])
|
||||
['rsync://buildbot@{host}/work/{project}', '..'])
|
||||
|
||||
def run(self, opts):
|
||||
cmd = self.SYNC_CMD.format(host=HOST, project=PROJECT)
|
||||
env = dict(os.environ)
|
||||
env['RSYNC_PASSWORD'] = get_rsync_pw()
|
||||
self.info(cmd)
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
subprocess.check_call(cmd, shell=True, env=env)
|
||||
|
||||
|
||||
class Push(Command):
|
||||
@ -81,7 +87,8 @@ class VMInstaller(Command):
|
||||
|
||||
|
||||
def get_build_script(self):
|
||||
ans = '\n'.join(self.BUILD_PREFIX)+'\n\n'
|
||||
rs = ['export RSYNC_PASSWORD=%s'%get_rsync_pw()]
|
||||
ans = '\n'.join(self.BUILD_PREFIX + rs)+'\n\n'
|
||||
ans += ' && \\\n'.join(self.BUILD_RSYNC) + ' && \\\n'
|
||||
ans += ' && \\\n'.join(self.BUILD_CLEAN) + ' && \\\n'
|
||||
ans += ' && \\\n'.join(self.BUILD_BUILD) + ' && \\\n'
|
||||
|
@ -555,7 +555,8 @@ from calibre.devices.irexdr.driver import IREXDR1000, IREXDR800
|
||||
from calibre.devices.jetbook.driver import JETBOOK, MIBUK, JETBOOK_MINI
|
||||
from calibre.devices.kindle.driver import KINDLE, KINDLE2, KINDLE_DX
|
||||
from calibre.devices.nook.driver import NOOK, NOOK_COLOR
|
||||
from calibre.devices.prs505.driver import PRS505, PRST1
|
||||
from calibre.devices.prs505.driver import PRS505
|
||||
from calibre.devices.prst1.driver import PRST1
|
||||
from calibre.devices.user_defined.driver import USER_DEFINED
|
||||
from calibre.devices.android.driver import ANDROID, S60, WEBOS
|
||||
from calibre.devices.nokia.driver import N770, N810, E71X, E52
|
||||
@ -1143,6 +1144,16 @@ class StoreAmazonDEKindleStore(StoreBase):
|
||||
formats = ['KINDLE']
|
||||
affiliate = True
|
||||
|
||||
class StoreAmazonFRKindleStore(StoreBase):
|
||||
name = 'Amazon FR Kindle'
|
||||
author = 'Charles Haley'
|
||||
description = u'Tous les ebooks Kindle'
|
||||
actual_plugin = 'calibre.gui2.store.stores.amazon_fr_plugin:AmazonFRKindleStore'
|
||||
|
||||
headquarters = 'DE'
|
||||
formats = ['KINDLE']
|
||||
affiliate = True
|
||||
|
||||
class StoreAmazonUKKindleStore(StoreBase):
|
||||
name = 'Amazon UK Kindle'
|
||||
author = 'Charles Haley'
|
||||
@ -1520,6 +1531,7 @@ plugins += [
|
||||
StoreArchiveOrgStore,
|
||||
StoreAmazonKindleStore,
|
||||
StoreAmazonDEKindleStore,
|
||||
StoreAmazonFRKindleStore,
|
||||
StoreAmazonUKKindleStore,
|
||||
StoreBaenWebScriptionStore,
|
||||
StoreBNStore,
|
||||
|
@ -217,7 +217,7 @@ class DevicePlugin(Plugin):
|
||||
'''
|
||||
Unix version of :meth:`can_handle_windows`
|
||||
|
||||
:param device_info: Is a tupe of (vid, pid, bcd, manufacturer, product,
|
||||
:param device_info: Is a tuple of (vid, pid, bcd, manufacturer, product,
|
||||
serial number)
|
||||
|
||||
'''
|
||||
@ -518,3 +518,9 @@ class BookList(list):
|
||||
'''
|
||||
raise NotImplementedError()
|
||||
|
||||
def prepare_addable_books(self, paths):
|
||||
'''
|
||||
Given a list of paths, returns another list of paths. These paths
|
||||
point to addable versions of the books.
|
||||
'''
|
||||
return paths
|
||||
|
@ -299,40 +299,3 @@ class PRS505(USBMS):
|
||||
f.write(metadata.thumbnail[-1])
|
||||
debug_print('Cover uploaded to: %r'%cpath)
|
||||
|
||||
class PRST1(USBMS):
|
||||
name = 'SONY PRST1 and newer Device Interface'
|
||||
gui_name = 'SONY Reader'
|
||||
description = _('Communicate with Sony PRST1 and newer eBook readers')
|
||||
author = 'Kovid Goyal'
|
||||
supported_platforms = ['windows', 'osx', 'linux']
|
||||
|
||||
FORMATS = ['epub', 'lrf', 'lrx', 'rtf', 'pdf', 'txt']
|
||||
VENDOR_ID = [0x054c] #: SONY Vendor Id
|
||||
PRODUCT_ID = [0x05c2]
|
||||
BCD = [0x226]
|
||||
|
||||
VENDOR_NAME = 'SONY'
|
||||
WINDOWS_MAIN_MEM = re.compile(
|
||||
r'(PRS-T1&)'
|
||||
)
|
||||
|
||||
THUMBNAIL_HEIGHT = 217
|
||||
SCAN_FROM_ROOT = True
|
||||
EBOOK_DIR_MAIN = __appname__
|
||||
SUPPORTS_SUB_DIRS = True
|
||||
|
||||
def windows_filter_pnp_id(self, pnp_id):
|
||||
return '_LAUNCHER' in pnp_id or '_SETTING' in pnp_id
|
||||
|
||||
def get_carda_ebook_dir(self, for_upload=False):
|
||||
if for_upload:
|
||||
return __appname__
|
||||
return self.EBOOK_DIR_CARD_A
|
||||
|
||||
def get_main_ebook_dir(self, for_upload=False):
|
||||
if for_upload:
|
||||
return __appname__
|
||||
return ''
|
||||
|
||||
|
||||
|
||||
|
7
src/calibre/devices/prst1/__init__.py
Normal file
7
src/calibre/devices/prst1/__init__.py
Normal file
@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
460
src/calibre/devices/prst1/driver.py
Normal file
460
src/calibre/devices/prst1/driver.py
Normal file
@ -0,0 +1,460 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
'''
|
||||
Device driver for the SONY T1 devices
|
||||
'''
|
||||
|
||||
import os, time, calendar, re
|
||||
import sqlite3 as sqlite
|
||||
from contextlib import closing
|
||||
|
||||
from calibre.devices.usbms.driver import USBMS, debug_print
|
||||
from calibre.devices.usbms.device import USBDevice
|
||||
from calibre.devices.usbms.books import CollectionsBookList
|
||||
from calibre.devices.usbms.books import BookList
|
||||
from calibre.constants import islinux
|
||||
|
||||
DBPATH = 'Sony_Reader/database/books.db'
|
||||
THUMBPATH = 'Sony_Reader/database/cache/books/%s/thumbnail/main_thumbnail.jpg'
|
||||
|
||||
class ImageWrapper(object):
|
||||
def __init__(self, image_path):
|
||||
self.image_path = image_path
|
||||
|
||||
class PRST1(USBMS):
|
||||
name = 'SONY PRST1 and newer Device Interface'
|
||||
gui_name = 'SONY Reader'
|
||||
description = _('Communicate with the PRST1 and newer SONY eBook readers')
|
||||
author = 'Kovid Goyal'
|
||||
supported_platforms = ['windows', 'osx', 'linux']
|
||||
path_sep = '/'
|
||||
booklist_class = CollectionsBookList
|
||||
|
||||
FORMATS = ['epub', 'pdf', 'txt']
|
||||
CAN_SET_METADATA = ['collections']
|
||||
CAN_DO_DEVICE_DB_PLUGBOARD = True
|
||||
|
||||
VENDOR_ID = [0x054c] #: SONY Vendor Id
|
||||
PRODUCT_ID = [0x05c2]
|
||||
BCD = [0x226]
|
||||
|
||||
VENDOR_NAME = 'SONY'
|
||||
WINDOWS_MAIN_MEM = re.compile(
|
||||
r'(PRS-T1&)'
|
||||
)
|
||||
MAIN_MEMORY_VOLUME_LABEL = 'SONY Reader Main Memory'
|
||||
STORAGE_CARD_VOLUME_LABEL = 'SONY Reader Storage Card'
|
||||
|
||||
THUMBNAIL_HEIGHT = 144
|
||||
SUPPORTS_SUB_DIRS = True
|
||||
MUST_READ_METADATA = True
|
||||
EBOOK_DIR_MAIN = 'Sony_Reader/media/books'
|
||||
|
||||
EXTRA_CUSTOMIZATION_MESSAGE = [
|
||||
_('Comma separated list of metadata fields '
|
||||
'to turn into collections on the device. Possibilities include: ')+\
|
||||
'series, tags, authors',
|
||||
_('Upload separate cover thumbnails for books') +
|
||||
':::'+_('Normally, the SONY readers get the cover image from the'
|
||||
' ebook file itself. With this option, calibre will send a '
|
||||
'separate cover image to the reader, useful if you are '
|
||||
'sending DRMed books in which you cannot change the cover.'),
|
||||
_('Refresh separate covers when using automatic management') +
|
||||
':::' +
|
||||
_('Set this option to have separate book covers uploaded '
|
||||
'every time you connect your device. Unset this option if '
|
||||
'you have so many books on the reader that performance is '
|
||||
'unacceptable.'),
|
||||
_('Preserve cover aspect ratio when building thumbnails') +
|
||||
':::' +
|
||||
_('Set this option if you want the cover thumbnails to have '
|
||||
'the same aspect ratio (width to height) as the cover. '
|
||||
'Unset it if you want the thumbnail to be the maximum size, '
|
||||
'ignoring aspect ratio.'),
|
||||
]
|
||||
EXTRA_CUSTOMIZATION_DEFAULT = [
|
||||
', '.join(['series', 'tags']),
|
||||
True,
|
||||
False,
|
||||
True,
|
||||
]
|
||||
|
||||
OPT_COLLECTIONS = 0
|
||||
OPT_UPLOAD_COVERS = 1
|
||||
OPT_REFRESH_COVERS = 2
|
||||
OPT_PRESERVE_ASPECT_RATIO = 3
|
||||
|
||||
plugboards = None
|
||||
plugboard_func = None
|
||||
|
||||
def post_open_callback(self):
|
||||
# Set the thumbnail width to the theoretical max if the user has asked
|
||||
# that we do not preserve aspect ratio
|
||||
if not self.settings().extra_customization[self.OPT_PRESERVE_ASPECT_RATIO]:
|
||||
self.THUMBNAIL_WIDTH = 108
|
||||
|
||||
def windows_filter_pnp_id(self, pnp_id):
|
||||
return '_LAUNCHER' in pnp_id or '_SETTING' in pnp_id
|
||||
|
||||
def get_carda_ebook_dir(self, for_upload=False):
|
||||
if for_upload:
|
||||
return self.EBOOK_DIR_MAIN
|
||||
return self.EBOOK_DIR_CARD_A
|
||||
|
||||
def get_main_ebook_dir(self, for_upload=False):
|
||||
if for_upload:
|
||||
return self.EBOOK_DIR_MAIN
|
||||
return ''
|
||||
|
||||
def can_handle(self, devinfo, debug=False):
|
||||
if islinux:
|
||||
dev = USBDevice(devinfo)
|
||||
main, carda, cardb = self.find_device_nodes(detected_device=dev)
|
||||
if main is None and carda is None and cardb is None:
|
||||
if debug:
|
||||
print ('\tPRS-T1: Appears to be in non data mode'
|
||||
' or was ejected, ignoring')
|
||||
return False
|
||||
return True
|
||||
|
||||
def books(self, oncard=None, end_session=True):
|
||||
dummy_bl = BookList(None, None, None)
|
||||
|
||||
if (
|
||||
(oncard == 'carda' and not self._card_a_prefix) or
|
||||
(oncard and oncard != 'carda')
|
||||
):
|
||||
self.report_progress(1.0, _('Getting list of books on device...'))
|
||||
return dummy_bl
|
||||
|
||||
prefix = self._card_a_prefix if oncard == 'carda' else self._main_prefix
|
||||
|
||||
# Let parent driver get the books
|
||||
self.booklist_class.rebuild_collections = self.rebuild_collections
|
||||
bl = USBMS.books(self, oncard=oncard, end_session=end_session)
|
||||
|
||||
dbpath = self.normalize_path(prefix + DBPATH)
|
||||
debug_print("SQLite DB Path: " + dbpath)
|
||||
|
||||
with closing(sqlite.connect(dbpath)) as connection:
|
||||
# Replace undecodable characters in the db instead of erroring out
|
||||
connection.text_factory = lambda x: unicode(x, "utf-8", "replace")
|
||||
|
||||
cursor = connection.cursor()
|
||||
# Query collections
|
||||
query = '''
|
||||
SELECT books._id, collection.title
|
||||
FROM collections
|
||||
LEFT OUTER JOIN books
|
||||
LEFT OUTER JOIN collection
|
||||
WHERE collections.content_id = books._id AND
|
||||
collections.collection_id = collection._id
|
||||
'''
|
||||
cursor.execute(query)
|
||||
|
||||
bl_collections = {}
|
||||
for i, row in enumerate(cursor):
|
||||
bl_collections.setdefault(row[0], [])
|
||||
bl_collections[row[0]].append(row[1])
|
||||
|
||||
for idx, book in enumerate(bl):
|
||||
query = 'SELECT _id, thumbnail FROM books WHERE file_path = ?'
|
||||
t = (book.lpath,)
|
||||
cursor.execute (query, t)
|
||||
|
||||
for i, row in enumerate(cursor):
|
||||
book.device_collections = bl_collections.get(row[0], None)
|
||||
thumbnail = row[1]
|
||||
if thumbnail is not None:
|
||||
thumbnail = self.normalize_path(prefix + thumbnail)
|
||||
book.thumbnail = ImageWrapper(thumbnail)
|
||||
|
||||
cursor.close()
|
||||
|
||||
return bl
|
||||
|
||||
def set_plugboards(self, plugboards, pb_func):
|
||||
self.plugboards = plugboards
|
||||
self.plugboard_func = pb_func
|
||||
|
||||
def sync_booklists(self, booklists, end_session=True):
|
||||
debug_print('PRST1: starting sync_booklists')
|
||||
|
||||
opts = self.settings()
|
||||
if opts.extra_customization:
|
||||
collections = [x.strip() for x in
|
||||
opts.extra_customization[self.OPT_COLLECTIONS].split(',')]
|
||||
else:
|
||||
collections = []
|
||||
debug_print('PRST1: collection fields:', collections)
|
||||
|
||||
if booklists[0] is not None:
|
||||
self.update_device_database(booklists[0], collections, None)
|
||||
if booklists[1] is not None:
|
||||
self.update_device_database(booklists[1], collections, 'carda')
|
||||
|
||||
USBMS.sync_booklists(self, booklists, end_session=end_session)
|
||||
debug_print('PRST1: finished sync_booklists')
|
||||
|
||||
def update_device_database(self, booklist, collections_attributes, oncard):
|
||||
debug_print('PRST1: starting update_device_database')
|
||||
|
||||
plugboard = None
|
||||
if self.plugboard_func:
|
||||
plugboard = self.plugboard_func(self.__class__.__name__,
|
||||
'device_db', self.plugboards)
|
||||
debug_print("PRST1: Using Plugboard", plugboard)
|
||||
|
||||
prefix = self._card_a_prefix if oncard == 'carda' else self._main_prefix
|
||||
if prefix is None:
|
||||
# Reader has no sd card inserted
|
||||
return
|
||||
source_id = 1 if oncard == 'carda' else 0
|
||||
|
||||
dbpath = self.normalize_path(prefix + DBPATH)
|
||||
debug_print("SQLite DB Path: " + dbpath)
|
||||
|
||||
collections = booklist.get_collections(collections_attributes)
|
||||
|
||||
with closing(sqlite.connect(dbpath)) as connection:
|
||||
self.update_device_books(connection, booklist, source_id, plugboard)
|
||||
self.update_device_collections(connection, booklist, collections, source_id)
|
||||
|
||||
debug_print('PRST1: finished update_device_database')
|
||||
|
||||
def update_device_books(self, connection, booklist, source_id, plugboard):
|
||||
opts = self.settings()
|
||||
upload_covers = opts.extra_customization[self.OPT_UPLOAD_COVERS]
|
||||
refresh_covers = opts.extra_customization[self.OPT_REFRESH_COVERS]
|
||||
|
||||
cursor = connection.cursor()
|
||||
|
||||
# Get existing books
|
||||
query = 'SELECT file_path, _id FROM books'
|
||||
cursor.execute(query)
|
||||
|
||||
db_books = {}
|
||||
for i, row in enumerate(cursor):
|
||||
lpath = row[0].replace('\\', '/')
|
||||
db_books[lpath] = row[1]
|
||||
|
||||
for book in booklist:
|
||||
# Run through plugboard if needed
|
||||
if plugboard is not None:
|
||||
newmi = book.deepcopy_metadata()
|
||||
newmi.template_to_attribute(book, plugboard)
|
||||
else:
|
||||
newmi = book
|
||||
|
||||
# Get Metadata We Want
|
||||
lpath = book.lpath
|
||||
author = newmi.authors[0]
|
||||
title = newmi.title
|
||||
|
||||
if lpath not in db_books:
|
||||
query = '''
|
||||
INSERT INTO books
|
||||
(title, author, source_id, added_date, modified_date,
|
||||
file_path, file_name, file_size, mime_type, corrupted,
|
||||
prevent_delete)
|
||||
values (?,?,?,?,?,?,?,?,?,0,0)
|
||||
'''
|
||||
t = (title, author, source_id, int(time.time() * 1000),
|
||||
int(calendar.timegm(book.datetime) * 1000), lpath,
|
||||
os.path.basename(book.lpath), book.size, book.mime)
|
||||
cursor.execute(query, t)
|
||||
book.bookId = cursor.lastrowid
|
||||
if upload_covers:
|
||||
self.upload_book_cover(connection, book, source_id)
|
||||
debug_print('Inserted New Book: ' + book.title)
|
||||
else:
|
||||
query = '''
|
||||
UPDATE books
|
||||
SET title = ?, author = ?, modified_date = ?, file_size = ?
|
||||
WHERE file_path = ?
|
||||
'''
|
||||
t = (title, author, int(calendar.timegm(book.datetime) * 1000), book.size,
|
||||
lpath)
|
||||
cursor.execute(query, t)
|
||||
book.bookId = db_books[lpath]
|
||||
if refresh_covers:
|
||||
self.upload_book_cover(connection, book, source_id)
|
||||
db_books[lpath] = None
|
||||
|
||||
for book, bookId in db_books.items():
|
||||
if bookId is not None:
|
||||
# Remove From Collections
|
||||
query = 'DELETE FROM collections WHERE content_id = ?'
|
||||
t = (bookId,)
|
||||
cursor.execute(query, t)
|
||||
# Remove from Books
|
||||
query = 'DELETE FROM books where _id = ?'
|
||||
t = (bookId,)
|
||||
cursor.execute(query, t)
|
||||
debug_print('Deleted Book:' + book)
|
||||
|
||||
connection.commit()
|
||||
cursor.close()
|
||||
|
||||
def update_device_collections(self, connection, booklist, collections,
|
||||
source_id):
|
||||
cursor = connection.cursor()
|
||||
|
||||
if collections:
|
||||
# Get existing collections
|
||||
query = 'SELECT _id, title FROM collection'
|
||||
cursor.execute(query)
|
||||
|
||||
db_collections = {}
|
||||
for i, row in enumerate(cursor):
|
||||
db_collections[row[1]] = row[0]
|
||||
|
||||
for collection, books in collections.items():
|
||||
if collection not in db_collections:
|
||||
query = 'INSERT INTO collection (title, source_id) VALUES (?,?)'
|
||||
t = (collection, source_id)
|
||||
cursor.execute(query, t)
|
||||
db_collections[collection] = cursor.lastrowid
|
||||
debug_print('Inserted New Collection: ' + collection)
|
||||
|
||||
# Get existing books in collection
|
||||
query = '''
|
||||
SELECT books.file_path, content_id
|
||||
FROM collections
|
||||
LEFT OUTER JOIN books
|
||||
WHERE collection_id = ? AND books._id = collections.content_id
|
||||
'''
|
||||
t = (db_collections[collection],)
|
||||
cursor.execute(query, t)
|
||||
|
||||
db_books = {}
|
||||
for i, row in enumerate(cursor):
|
||||
db_books[row[0]] = row[1]
|
||||
|
||||
for idx, book in enumerate(books):
|
||||
if collection not in book.device_collections:
|
||||
book.device_collections.append(collection)
|
||||
if db_books.get(book.lpath, None) is None:
|
||||
query = '''
|
||||
INSERT INTO collections (collection_id, content_id,
|
||||
added_order) values (?,?,?)
|
||||
'''
|
||||
t = (db_collections[collection], book.bookId, idx)
|
||||
cursor.execute(query, t)
|
||||
debug_print('Inserted Book Into Collection: ' +
|
||||
book.title + ' -> ' + collection)
|
||||
else:
|
||||
query = '''
|
||||
UPDATE collections
|
||||
SET added_order = ?
|
||||
WHERE content_id = ? AND collection_id = ?
|
||||
'''
|
||||
t = (idx, book.bookId, db_collections[collection])
|
||||
cursor.execute(query, t)
|
||||
|
||||
db_books[book.lpath] = None
|
||||
|
||||
for bookPath, bookId in db_books.items():
|
||||
if bookId is not None:
|
||||
query = ('DELETE FROM collections '
|
||||
'WHERE content_id = ? AND collection_id = ? ')
|
||||
t = (bookId, db_collections[collection],)
|
||||
cursor.execute(query, t)
|
||||
debug_print('Deleted Book From Collection: ' + bookPath
|
||||
+ ' -> ' + collection)
|
||||
|
||||
db_collections[collection] = None
|
||||
|
||||
for collection, collectionId in db_collections.items():
|
||||
if collectionId is not None:
|
||||
# Remove Books from Collection
|
||||
query = ('DELETE FROM collections '
|
||||
'WHERE collection_id = ?')
|
||||
t = (collectionId,)
|
||||
cursor.execute(query, t)
|
||||
# Remove Collection
|
||||
query = ('DELETE FROM collection '
|
||||
'WHERE _id = ?')
|
||||
t = (collectionId,)
|
||||
cursor.execute(query, t)
|
||||
debug_print('Deleted Collection: ' + collection)
|
||||
|
||||
|
||||
connection.commit()
|
||||
cursor.close()
|
||||
|
||||
def rebuild_collections(self, booklist, oncard):
|
||||
debug_print('PRST1: starting rebuild_collections')
|
||||
|
||||
opts = self.settings()
|
||||
if opts.extra_customization:
|
||||
collections = [x.strip() for x in
|
||||
opts.extra_customization[self.OPT_COLLECTIONS].split(',')]
|
||||
else:
|
||||
collections = []
|
||||
debug_print('PRST1: collection fields:', collections)
|
||||
|
||||
self.update_device_database(booklist, collections, oncard)
|
||||
|
||||
debug_print('PRS-T1: finished rebuild_collections')
|
||||
|
||||
def upload_cover(self, path, filename, metadata, filepath):
|
||||
debug_print('PRS-T1: uploading cover')
|
||||
|
||||
if filepath.startswith(self._main_prefix):
|
||||
prefix = self._main_prefix
|
||||
source_id = 0
|
||||
else:
|
||||
prefix = self._card_a_prefix
|
||||
source_id = 1
|
||||
|
||||
metadata.lpath = filepath.partition(prefix)[2]
|
||||
dbpath = self.normalize_path(prefix + DBPATH)
|
||||
debug_print("SQLite DB Path: " + dbpath)
|
||||
|
||||
with closing(sqlite.connect(dbpath)) as connection:
|
||||
cursor = connection.cursor()
|
||||
|
||||
query = 'SELECT _id FROM books WHERE file_path = ?'
|
||||
t = (metadata.lpath,)
|
||||
cursor.execute(query, t)
|
||||
|
||||
for i, row in enumerate(cursor):
|
||||
metadata.bookId = row[0]
|
||||
|
||||
cursor.close()
|
||||
|
||||
if metadata.bookId is not None:
|
||||
debug_print('PRS-T1: refreshing cover for book being sent')
|
||||
self.upload_book_cover(connection, metadata, source_id)
|
||||
|
||||
debug_print('PRS-T1: done uploading cover')
|
||||
|
||||
def upload_book_cover(self, connection, book, source_id):
|
||||
debug_print('PRST1: Uploading/Refreshing Cover for ' + book.title)
|
||||
if not book.thumbnail and book.thumbnail[-1]:
|
||||
return
|
||||
cursor = connection.cursor()
|
||||
|
||||
thumbnail_path = THUMBPATH%book.bookId
|
||||
|
||||
prefix = self._main_prefix if source_id is 0 else self._card_a_prefix
|
||||
thumbnail_file_path = os.path.join(prefix, *thumbnail_path.split('/'))
|
||||
thumbnail_dir_path = os.path.dirname(thumbnail_file_path)
|
||||
if not os.path.exists(thumbnail_dir_path):
|
||||
os.makedirs(thumbnail_dir_path)
|
||||
|
||||
with open(thumbnail_file_path, 'wb') as f:
|
||||
f.write(book.thumbnail[-1])
|
||||
|
||||
query = 'UPDATE books SET thumbnail = ? WHERE _id = ?'
|
||||
t = (thumbnail_path, book.bookId,)
|
||||
cursor.execute(query, t)
|
||||
|
||||
connection.commit()
|
||||
cursor.close()
|
@ -483,7 +483,7 @@ class Device(DeviceConfig, DevicePlugin):
|
||||
self._card_a_prefix = get_card_prefix('carda')
|
||||
self._card_b_prefix = get_card_prefix('cardb')
|
||||
|
||||
def find_device_nodes(self):
|
||||
def find_device_nodes(self, detected_device=None):
|
||||
|
||||
def walk(base):
|
||||
base = os.path.abspath(os.path.realpath(base))
|
||||
@ -507,8 +507,11 @@ class Device(DeviceConfig, DevicePlugin):
|
||||
d, j = os.path.dirname, os.path.join
|
||||
usb_dir = None
|
||||
|
||||
if detected_device is None:
|
||||
detected_device = self.detected_device
|
||||
|
||||
def test(val, attr):
|
||||
q = getattr(self.detected_device, attr)
|
||||
q = getattr(detected_device, attr)
|
||||
return q == val
|
||||
|
||||
for x, isfile in walk('/sys/devices'):
|
||||
@ -596,6 +599,8 @@ class Device(DeviceConfig, DevicePlugin):
|
||||
label = self.STORAGE_CARD2_VOLUME_LABEL
|
||||
if not label:
|
||||
label = self.STORAGE_CARD_VOLUME_LABEL + ' 2'
|
||||
if not label:
|
||||
label = 'E-book Reader (%s)'%type
|
||||
extra = 0
|
||||
while True:
|
||||
q = ' (%d)'%extra if extra else ''
|
||||
|
@ -397,6 +397,7 @@ class AddAction(InterfaceAction):
|
||||
d = error_dialog(self.gui, _('Add to library'), _('No book files found'))
|
||||
d.exec_()
|
||||
return
|
||||
paths = self.gui.device_manager.device.prepare_addable_books(paths)
|
||||
from calibre.gui2.add import Adder
|
||||
self.__adder_func = partial(self._add_from_device_adder, on_card=None,
|
||||
model=view.model())
|
||||
|
@ -206,7 +206,7 @@
|
||||
<item>
|
||||
<widget class="QCheckBox" name="opt_autolaunch_server">
|
||||
<property name="text">
|
||||
<string>Run server &automatically on startup</string>
|
||||
<string>Run server &automatically when calibre starts</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
|
@ -6,7 +6,6 @@ __license__ = 'GPL 3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import urllib
|
||||
from contextlib import closing
|
||||
|
||||
from lxml import html
|
||||
@ -37,27 +36,16 @@ class AmazonDEKindleStore(StorePlugin):
|
||||
|
||||
def search(self, query, max_results=10, timeout=60):
|
||||
search_url = 'http://www.amazon.de/s/?url=search-alias%3Ddigital-text&field-keywords='
|
||||
url = search_url + urllib.quote_plus(query)
|
||||
url = search_url + query.encode('ascii', 'backslashreplace').replace('%', '%25').replace('\\x', '%').replace(' ', '+')
|
||||
br = browser()
|
||||
|
||||
counter = max_results
|
||||
with closing(br.open(url, timeout=timeout)) as f:
|
||||
doc = html.fromstring(f.read())
|
||||
doc = html.fromstring(f.read().decode('latin-1', 'replace'))
|
||||
|
||||
# Amazon has two results pages.
|
||||
# 20110725: seems that is_shot is gone.
|
||||
# is_shot = doc.xpath('boolean(//div[@id="shotgunMainResults"])')
|
||||
# # Horizontal grid of books.
|
||||
# if is_shot:
|
||||
# data_xpath = '//div[contains(@class, "result")]'
|
||||
# format_xpath = './/div[@class="productTitle"]/text()'
|
||||
# cover_xpath = './/div[@class="productTitle"]//img/@src'
|
||||
# # Vertical list of books.
|
||||
# else:
|
||||
data_xpath = '//div[contains(@class, "result") and contains(@class, "product")]'
|
||||
format_xpath = './/span[@class="format"]/text()'
|
||||
cover_xpath = './/img[@class="productImage"]/@src'
|
||||
# end is_shot else
|
||||
|
||||
for data in doc.xpath(data_xpath):
|
||||
if counter <= 0:
|
||||
@ -80,11 +68,9 @@ class AmazonDEKindleStore(StorePlugin):
|
||||
title = ''.join(data.xpath('.//div[@class="title"]/a/text()'))
|
||||
price = ''.join(data.xpath('.//div[@class="newPrice"]/span/text()'))
|
||||
|
||||
# if is_shot:
|
||||
# author = format.split(' von ')[-1]
|
||||
# else:
|
||||
author = ''.join(data.xpath('.//div[@class="title"]/span[@class="ptBrand"]/text()'))
|
||||
author = author.split('von ')[-1]
|
||||
if author.startswith('von '):
|
||||
author = author[4:]
|
||||
|
||||
counter -= 1
|
||||
|
||||
|
82
src/calibre/gui2/store/stores/amazon_fr_plugin.py
Normal file
82
src/calibre/gui2/store/stores/amazon_fr_plugin.py
Normal file
@ -0,0 +1,82 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import (unicode_literals, division, absolute_import, print_function)
|
||||
|
||||
__license__ = 'GPL 3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
from contextlib import closing
|
||||
|
||||
from lxml import html
|
||||
|
||||
from PyQt4.Qt import QUrl
|
||||
|
||||
from calibre import browser
|
||||
from calibre.gui2 import open_url
|
||||
from calibre.gui2.store import StorePlugin
|
||||
from calibre.gui2.store.search_result import SearchResult
|
||||
|
||||
class AmazonFRKindleStore(StorePlugin):
|
||||
'''
|
||||
For comments on the implementation, please see amazon_plugin.py
|
||||
'''
|
||||
|
||||
def open(self, parent=None, detail_item=None, external=False):
|
||||
aff_id = {'tag': 'charhale-21'}
|
||||
store_link = 'http://www.amazon.fr/livres-kindle/b?ie=UTF8&node=695398031&ref_=sa_menu_kbo1&_encoding=UTF8&tag=%(tag)s&linkCode=ur2&camp=1642&creative=19458' % aff_id
|
||||
|
||||
if detail_item:
|
||||
aff_id['asin'] = detail_item
|
||||
store_link = 'http://www.amazon.fr/gp/redirect.html?ie=UTF8&location=http://www.amazon.fr/dp/%(asin)s&tag=%(tag)s&linkCode=ur2&camp=1634&creative=6738' % aff_id
|
||||
open_url(QUrl(store_link))
|
||||
|
||||
def search(self, query, max_results=10, timeout=60):
|
||||
search_url = 'http://www.amazon.fr/s/?url=search-alias%3Ddigital-text&field-keywords='
|
||||
url = search_url + query.encode('ascii', 'backslashreplace').replace('%', '%25').replace('\\x', '%').replace(' ', '+')
|
||||
br = browser()
|
||||
|
||||
counter = max_results
|
||||
with closing(br.open(url, timeout=timeout)) as f:
|
||||
doc = html.fromstring(f.read().decode('latin-1', 'replace'))
|
||||
|
||||
data_xpath = '//div[contains(@class, "result") and contains(@class, "product")]'
|
||||
format_xpath = './/span[@class="format"]/text()'
|
||||
cover_xpath = './/img[@class="productImage"]/@src'
|
||||
|
||||
for data in doc.xpath(data_xpath):
|
||||
if counter <= 0:
|
||||
break
|
||||
|
||||
# Even though we are searching digital-text only Amazon will still
|
||||
# put in results for non Kindle books (author pages). So we need
|
||||
# to explicitly check if the item is a Kindle book and ignore it
|
||||
# if it isn't.
|
||||
format = ''.join(data.xpath(format_xpath))
|
||||
if 'kindle' not in format.lower():
|
||||
continue
|
||||
|
||||
# We must have an asin otherwise we can't easily reference the
|
||||
# book later.
|
||||
asin = ''.join(data.xpath("@name"))
|
||||
|
||||
cover_url = ''.join(data.xpath(cover_xpath))
|
||||
|
||||
title = ''.join(data.xpath('.//div[@class="title"]/a/text()'))
|
||||
price = ''.join(data.xpath('.//div[@class="newPrice"]/span/text()'))
|
||||
author = unicode(''.join(data.xpath('.//div[@class="title"]/span[@class="ptBrand"]/text()')))
|
||||
if author.startswith('de '):
|
||||
author = author[3:]
|
||||
|
||||
counter -= 1
|
||||
|
||||
s = SearchResult()
|
||||
s.cover_url = cover_url.strip()
|
||||
s.title = title.strip()
|
||||
s.author = author.strip()
|
||||
s.price = price.strip()
|
||||
s.detail_item = asin.strip()
|
||||
s.formats = 'Kindle'
|
||||
s.drm = SearchResult.DRM_UNKNOWN
|
||||
|
||||
yield s
|
@ -8,7 +8,6 @@ __docformat__ = 'restructuredtext en'
|
||||
|
||||
import random
|
||||
import re
|
||||
import urllib
|
||||
from contextlib import closing
|
||||
|
||||
from lxml import html
|
||||
@ -122,12 +121,12 @@ class AmazonKindleStore(StorePlugin):
|
||||
open_url(QUrl(store_link))
|
||||
|
||||
def search(self, query, max_results=10, timeout=60):
|
||||
url = self.search_url + urllib.quote_plus(query)
|
||||
url = self.search_url + query.encode('ascii', 'backslashreplace').replace('%', '%25').replace('\\x', '%').replace(' ', '+')
|
||||
br = browser()
|
||||
|
||||
counter = max_results
|
||||
with closing(br.open(url, timeout=timeout)) as f:
|
||||
doc = html.fromstring(f.read())
|
||||
doc = html.fromstring(f.read().decode('latin-1', 'replace'))
|
||||
|
||||
# Amazon has two results pages.
|
||||
is_shot = doc.xpath('boolean(//div[@id="shotgunMainResults"])')
|
||||
|
@ -6,7 +6,6 @@ __license__ = 'GPL 3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import urllib
|
||||
from contextlib import closing
|
||||
|
||||
from lxml import html
|
||||
@ -34,27 +33,16 @@ class AmazonUKKindleStore(StorePlugin):
|
||||
|
||||
def search(self, query, max_results=10, timeout=60):
|
||||
search_url = 'http://www.amazon.co.uk/s/?url=search-alias%3Ddigital-text&field-keywords='
|
||||
url = search_url + urllib.quote_plus(query)
|
||||
url = search_url + query.encode('ascii', 'backslashreplace').replace('%', '%25').replace('\\x', '%').replace(' ', '+')
|
||||
br = browser()
|
||||
|
||||
counter = max_results
|
||||
with closing(br.open(url, timeout=timeout)) as f:
|
||||
doc = html.fromstring(f.read())
|
||||
doc = html.fromstring(f.read().decode('latin-1', 'replace'))
|
||||
|
||||
# Amazon has two results pages.
|
||||
# 20110725: seems that is_shot is gone.
|
||||
# is_shot = doc.xpath('boolean(//div[@id="shotgunMainResults"])')
|
||||
# # Horizontal grid of books.
|
||||
# if is_shot:
|
||||
# data_xpath = '//div[contains(@class, "result")]'
|
||||
# format_xpath = './/div[@class="productTitle"]/text()'
|
||||
# cover_xpath = './/div[@class="productTitle"]//img/@src'
|
||||
# # Vertical list of books.
|
||||
# else:
|
||||
data_xpath = '//div[contains(@class, "result") and contains(@class, "product")]'
|
||||
format_xpath = './/span[@class="format"]/text()'
|
||||
cover_xpath = './/img[@class="productImage"]/@src'
|
||||
# end is_shot else
|
||||
|
||||
for data in doc.xpath(data_xpath):
|
||||
if counter <= 0:
|
||||
@ -77,11 +65,9 @@ class AmazonUKKindleStore(StorePlugin):
|
||||
title = ''.join(data.xpath('.//div[@class="title"]/a/text()'))
|
||||
price = ''.join(data.xpath('.//div[@class="newPrice"]/span/text()'))
|
||||
|
||||
# if is_shot:
|
||||
# author = format.split(' von ')[-1]
|
||||
# else:
|
||||
author = ''.join(data.xpath('.//div[@class="title"]/span[@class="ptBrand"]/text()'))
|
||||
author = author.split('by ')[-1]
|
||||
if author.startswith('by '):
|
||||
author = author[3:]
|
||||
|
||||
counter -= 1
|
||||
|
||||
|
@ -47,6 +47,9 @@ def get_parser(usage):
|
||||
def get_db(dbpath, options):
|
||||
if options.library_path is not None:
|
||||
dbpath = options.library_path
|
||||
if dbpath is None:
|
||||
raise ValueError('No saved library path, either run the GUI or use the'
|
||||
' --with-library option')
|
||||
dbpath = os.path.abspath(dbpath)
|
||||
return LibraryDatabase2(dbpath)
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user