mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Sync to trunk.
This commit is contained in:
commit
5046c5bf5a
51
recipes/cosmopolitan_uk.recipe
Normal file
51
recipes/cosmopolitan_uk.recipe
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
import re
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
#from calibre import __appname__
|
||||||
|
from calibre.utils.magick import Image
|
||||||
|
class AdvancedUserRecipe1306097511(BasicNewsRecipe):
|
||||||
|
title = u'Cosmopolitan UK'
|
||||||
|
description = 'Fashion, beauty and Gossip for women from COSMOPOLITAN -UK'
|
||||||
|
|
||||||
|
__author__ = 'Dave Asbury'
|
||||||
|
# greyscale code by Starson
|
||||||
|
cover_url = 'http://www.cosmopolitan.magazine.co.uk/files/4613/2085/8988/Cosmo_Cover3.jpg'
|
||||||
|
no_stylesheets = True
|
||||||
|
oldest_article = 7
|
||||||
|
max_articles_per_feed = 20
|
||||||
|
remove_empty_feeds = True
|
||||||
|
remove_javascript = True
|
||||||
|
|
||||||
|
preprocess_regexps = [
|
||||||
|
(re.compile(r'<!-- Begin tmpl module_competition_offer -->.*?<!-- End tmpl module_competition_offer-->', re.IGNORECASE | re.DOTALL), lambda match: '')]
|
||||||
|
language = 'en_GB'
|
||||||
|
|
||||||
|
|
||||||
|
masthead_url = 'http://www.cosmopolitan.co.uk/cm/cosmopolitanuk/site_images/header/cosmouk_logo_home.gif'
|
||||||
|
|
||||||
|
|
||||||
|
keep_only_tags = [
|
||||||
|
dict(attrs={'class' : ['dateAuthor', 'publishDate']}),
|
||||||
|
dict(name='div',attrs ={'id' : ['main_content']})
|
||||||
|
]
|
||||||
|
remove_tags = [
|
||||||
|
dict(name='div',attrs={'class' : ['blogInfo','viral_toolbar','comment_number','prevEntry nav']}),
|
||||||
|
dict(name='div',attrs={'class' : 'blog_module_about_the_authors'}),
|
||||||
|
dict(attrs={'id': ['breadcrumbs','comment','related_links_list','right_rail','content_sec_fb_more','content_sec_mostpopularstories','content-sec_fb_frame_viewfb_bot']}),
|
||||||
|
dict(attrs={'class' : ['read_liked_that_header','fb_back_next_area']})
|
||||||
|
]
|
||||||
|
|
||||||
|
feeds = [
|
||||||
|
(u'Love & Sex', u'http://www.cosmopolitan.co.uk/love-sex/rss/'), (u'Men', u'http://cosmopolitan.co.uk/men/rss/'), (u'Fashion', u'http://cosmopolitan.co.uk/fashion/rss/'), (u'Hair & Beauty', u'http://cosmopolitan.co.uk/beauty-hair/rss/'), (u'LifeStyle', u'http://cosmopolitan.co.uk/lifestyle/rss/'), (u'Cosmo On Campus', u'http://cosmopolitan.co.uk/campus/rss/'), (u'Celebrity Gossip', u'http://cosmopolitan.co.uk/celebrity-gossip/rss/')]
|
||||||
|
|
||||||
|
def postprocess_html(self, soup, first):
|
||||||
|
#process all the images
|
||||||
|
for tag in soup.findAll(lambda tag: tag.name.lower()=='img' and tag.has_key('src')):
|
||||||
|
iurl = tag['src']
|
||||||
|
img = Image()
|
||||||
|
img.open(iurl)
|
||||||
|
if img < 0:
|
||||||
|
raise RuntimeError('Out of memory')
|
||||||
|
img.type = "GrayscaleType"
|
||||||
|
img.save(iurl)
|
||||||
|
return soup
|
||||||
|
|
18
recipes/daily_writing_tips.recipe
Normal file
18
recipes/daily_writing_tips.recipe
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
class DailyWritingTips(BasicNewsRecipe):
|
||||||
|
title = u'Daily Writing Tips'
|
||||||
|
language = 'en_GB'
|
||||||
|
__author__ = 'NotTaken'
|
||||||
|
oldest_article = 7 #days
|
||||||
|
max_articles_per_feed = 40
|
||||||
|
use_embedded_content = True
|
||||||
|
no_stylesheets = True
|
||||||
|
auto_cleanup = False
|
||||||
|
encoding = 'utf-8'
|
||||||
|
|
||||||
|
|
||||||
|
feeds = [
|
||||||
|
('Latest tips',
|
||||||
|
'http://feeds2.feedburner.com/DailyWritingTips'),
|
||||||
|
]
|
BIN
recipes/icons/skylife.png
Normal file
BIN
recipes/icons/skylife.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 3.3 KiB |
@ -29,22 +29,7 @@ class RollingStones(BasicNewsRecipe):
|
|||||||
max_articles_per_feed = 25
|
max_articles_per_feed = 25
|
||||||
use_embedded_content = False
|
use_embedded_content = False
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
|
auto_cleanup = True
|
||||||
remove_javascript = True
|
|
||||||
#####################################################################################
|
|
||||||
# cleanup section #
|
|
||||||
#####################################################################################
|
|
||||||
keep_only_tags = [
|
|
||||||
dict(name='div', attrs={'class':['c65l']}),
|
|
||||||
dict(name='div', attrs={'id':['col1']}),
|
|
||||||
|
|
||||||
|
|
||||||
]
|
|
||||||
remove_tags = [
|
|
||||||
dict(name='div', attrs={'class': ['storyActions upper','storyActions lowerArticleNav']}),
|
|
||||||
dict(name='div', attrs={'id': ['comments','related']}),
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
feeds = [
|
feeds = [
|
||||||
(u'News', u'http://www.rollingstone.com/siteServices/rss/allNews'),
|
(u'News', u'http://www.rollingstone.com/siteServices/rss/allNews'),
|
||||||
@ -58,25 +43,7 @@ class RollingStones(BasicNewsRecipe):
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
def get_article_url(self, article):
|
def print_version(self, url):
|
||||||
return article.get('guid', None)
|
return url +'?print=true'
|
||||||
|
|
||||||
|
|
||||||
def append_page(self, soup, appendtag, position):
|
|
||||||
'''
|
|
||||||
Some are the articles are multipage so the below function
|
|
||||||
will get the articles that have <next>
|
|
||||||
'''
|
|
||||||
pager = soup.find('li',attrs={'class':'next'})
|
|
||||||
if pager:
|
|
||||||
nexturl = pager.a['href']
|
|
||||||
soup2 = self.index_to_soup(nexturl)
|
|
||||||
texttag = soup2.find('div', attrs={'id':'storyTextContainer'})
|
|
||||||
for it in texttag.findAll(style=True):
|
|
||||||
del it['style']
|
|
||||||
newpos = len(texttag.contents)
|
|
||||||
self.append_page(soup2,texttag,newpos)
|
|
||||||
texttag.extract()
|
|
||||||
appendtag.insert(position,texttag)
|
|
||||||
|
|
||||||
|
|
||||||
|
32
recipes/skylife.recipe
Normal file
32
recipes/skylife.recipe
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
class THY (BasicNewsRecipe):
|
||||||
|
|
||||||
|
title = u'Skylife'
|
||||||
|
__author__ = u'thomass'
|
||||||
|
description = ' Türk Hava Yollarının yayınladığı aylık kültür dergisi (Fotoğrafları da içermesini isterseniz keep_only_tag''da belirttiğim kodu da ekleyin) '
|
||||||
|
oldest_article =32
|
||||||
|
max_articles_per_feed =100
|
||||||
|
no_stylesheets = True
|
||||||
|
#delay = 1
|
||||||
|
#use_embedded_content = False
|
||||||
|
encoding = 'utf-8'
|
||||||
|
publisher = 'thomass'
|
||||||
|
category = 'genel kültür, gezi,Türkçe'
|
||||||
|
language = 'tr'
|
||||||
|
publication_type = 'magazine'
|
||||||
|
|
||||||
|
conversion_options = {
|
||||||
|
'comment' : description
|
||||||
|
, 'tags' : category
|
||||||
|
, 'publisher' : publisher
|
||||||
|
, 'language' : language
|
||||||
|
}
|
||||||
|
keep_only_tags = [dict(name='h3', attrs={'id':['hpbaslik']}),dict(name='p', attrs={'id':['pyayin','hspot','picerik']})] #Fotoğrafları da eklemek için: dict(name='div', attrs={'id':['divResimler']})
|
||||||
|
masthead_url = 'http://www.turkishairlines.com/static/img/skylife/logo.png'
|
||||||
|
remove_empty_feeds= True
|
||||||
|
remove_attributes = ['width','height']
|
||||||
|
|
||||||
|
feeds = [( u'SKYLIFE', u'http://feed43.com/7783278414103376.xml')]
|
20
recipes/techdirt.recipe
Normal file
20
recipes/techdirt.recipe
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
class TechDirt(BasicNewsRecipe):
|
||||||
|
title = u'Tech Dirt'
|
||||||
|
language = 'en'
|
||||||
|
__author__ = 'Krittika Goyal'
|
||||||
|
oldest_article = 7 #days
|
||||||
|
max_articles_per_feed = 25
|
||||||
|
use_embedded_content = False
|
||||||
|
|
||||||
|
no_stylesheets = True
|
||||||
|
auto_cleanup = True
|
||||||
|
encoding = 'latin1'
|
||||||
|
|
||||||
|
|
||||||
|
feeds = [
|
||||||
|
('News',
|
||||||
|
'http://feeds.feedburner.com/techdirt/feed'),
|
||||||
|
]
|
||||||
|
|
@ -219,7 +219,7 @@ per_language_title_sort_articles = {
|
|||||||
r'Una\s+', r'Unos\s+', r'Unas\s+'),
|
r'Una\s+', r'Unos\s+', r'Unas\s+'),
|
||||||
# French
|
# French
|
||||||
'fra' : (r'Le\s+', r'La\s+', r"L'", r'Les\s+', r'Un\s+', r'Une\s+',
|
'fra' : (r'Le\s+', r'La\s+', r"L'", r'Les\s+', r'Un\s+', r'Une\s+',
|
||||||
r'Des\s+'),
|
r'Des\s+', r'De\s+La\s+', r'De\s+', r"D'"),
|
||||||
# Italian
|
# Italian
|
||||||
'ita' : (r'Lo\s+', r'Il\s+', r"L'", r'La\s+', r'Gli\s+', r'I\s+',
|
'ita' : (r'Lo\s+', r'Il\s+', r"L'", r'La\s+', r'Gli\s+', r'I\s+',
|
||||||
r'Le\s+', ),
|
r'Le\s+', ),
|
||||||
@ -230,7 +230,8 @@ per_language_title_sort_articles = {
|
|||||||
'ron' : (r'Un\s+', r'O\s+', r'Nişte\s+', ),
|
'ron' : (r'Un\s+', r'O\s+', r'Nişte\s+', ),
|
||||||
# German
|
# German
|
||||||
'deu' : (r'Der\s+', r'Die\s+', r'Das\s+', r'Den\s+', r'Ein\s+',
|
'deu' : (r'Der\s+', r'Die\s+', r'Das\s+', r'Den\s+', r'Ein\s+',
|
||||||
r'Eine\s+', r'Einen\s+', ),
|
r'Eine\s+', r'Einen\s+', r'Dem\s+', r'Des\s+', r'Einem\s+',
|
||||||
|
r'Eines\s+'),
|
||||||
# Dutch
|
# Dutch
|
||||||
'nld' : (r'De\s+', r'Het\s+', r'Een\s+', r"'n\s+", r"'s\s+", r'Ene\s+',
|
'nld' : (r'De\s+', r'Het\s+', r'Een\s+', r"'n\s+", r"'s\s+", r'Ene\s+',
|
||||||
r'Ener\s+', r'Enes\s+', r'Den\s+', r'Der\s+', r'Des\s+',
|
r'Ener\s+', r'Enes\s+', r'Den\s+', r'Der\s+', r'Des\s+',
|
||||||
|
@ -449,7 +449,7 @@ class CatalogPlugin(Plugin): # {{{
|
|||||||
['author_sort','authors','comments','cover','formats',
|
['author_sort','authors','comments','cover','formats',
|
||||||
'id','isbn','ondevice','pubdate','publisher','rating',
|
'id','isbn','ondevice','pubdate','publisher','rating',
|
||||||
'series_index','series','size','tags','timestamp',
|
'series_index','series','size','tags','timestamp',
|
||||||
'title_sort','title','uuid'])
|
'title_sort','title','uuid','languages'])
|
||||||
all_custom_fields = set(db.custom_field_keys())
|
all_custom_fields = set(db.custom_field_keys())
|
||||||
all_fields = all_std_fields.union(all_custom_fields)
|
all_fields = all_std_fields.union(all_custom_fields)
|
||||||
|
|
||||||
|
@ -167,12 +167,12 @@ class ANDROID(USBMS):
|
|||||||
'MB525', 'ANDROID2.3', 'SGH-I997', 'GT-I5800_CARD', 'MB612',
|
'MB525', 'ANDROID2.3', 'SGH-I997', 'GT-I5800_CARD', 'MB612',
|
||||||
'GT-S5830_CARD', 'GT-S5570_CARD', 'MB870', 'MID7015A',
|
'GT-S5830_CARD', 'GT-S5570_CARD', 'MB870', 'MID7015A',
|
||||||
'ALPANDIGITAL', 'ANDROID_MID', 'VTAB1008', 'EMX51_BBG_ANDROI',
|
'ALPANDIGITAL', 'ANDROID_MID', 'VTAB1008', 'EMX51_BBG_ANDROI',
|
||||||
'UMS', '.K080', 'P990', 'LTE']
|
'UMS', '.K080', 'P990', 'LTE', 'MB853']
|
||||||
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
|
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
|
||||||
'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD',
|
'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD',
|
||||||
'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD',
|
'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD',
|
||||||
'__UMS_COMPOSITE', 'SGH-I997_CARD', 'MB870', 'ALPANDIGITAL',
|
'__UMS_COMPOSITE', 'SGH-I997_CARD', 'MB870', 'ALPANDIGITAL',
|
||||||
'ANDROID_MID', 'P990_SD_CARD', '.K080', 'LTE_CARD']
|
'ANDROID_MID', 'P990_SD_CARD', '.K080', 'LTE_CARD', 'MB853']
|
||||||
|
|
||||||
OSX_MAIN_MEM = 'Android Device Main Memory'
|
OSX_MAIN_MEM = 'Android Device Main Memory'
|
||||||
|
|
||||||
|
@ -1306,7 +1306,8 @@ class ITUNES(DriverBase):
|
|||||||
if DEBUG:
|
if DEBUG:
|
||||||
self.log.info(" ITUNES._add_new_copy()")
|
self.log.info(" ITUNES._add_new_copy()")
|
||||||
|
|
||||||
self._update_epub_metadata(fpath, metadata)
|
if fpath.rpartition('.')[2].lower() == 'epub':
|
||||||
|
self._update_epub_metadata(fpath, metadata)
|
||||||
|
|
||||||
db_added = None
|
db_added = None
|
||||||
lb_added = None
|
lb_added = None
|
||||||
|
@ -175,7 +175,7 @@ class ODYSSEY(N516):
|
|||||||
|
|
||||||
FORMATS = ['epub', 'fb2', 'html', 'pdf', 'txt']
|
FORMATS = ['epub', 'fb2', 'html', 'pdf', 'txt']
|
||||||
|
|
||||||
EBOOK_DIR_MAIN = 'calibre'
|
EBOOK_DIR_MAIN = 'Digital Editions'
|
||||||
|
|
||||||
def get_main_ebook_dir(self, for_upload=False):
|
def get_main_ebook_dir(self, for_upload=False):
|
||||||
if for_upload:
|
if for_upload:
|
||||||
|
@ -132,9 +132,11 @@ class EPUBOutput(OutputFormatPlugin):
|
|||||||
|
|
||||||
def upshift_markup(self): # {{{
|
def upshift_markup(self): # {{{
|
||||||
'Upgrade markup to comply with XHTML 1.1 where possible'
|
'Upgrade markup to comply with XHTML 1.1 where possible'
|
||||||
from calibre.ebooks.oeb.base import XPath
|
from calibre.ebooks.oeb.base import XPath, XML
|
||||||
for x in self.oeb.spine:
|
for x in self.oeb.spine:
|
||||||
root = x.data
|
root = x.data
|
||||||
|
if (not root.get(XML('lang'))) and (root.get('lang')):
|
||||||
|
root.set(XML('lang'), root.get('lang'))
|
||||||
body = XPath('//h:body')(root)
|
body = XPath('//h:body')(root)
|
||||||
if body:
|
if body:
|
||||||
body = body[0]
|
body = body[0]
|
||||||
|
@ -121,7 +121,18 @@ def cap_author_token(token):
|
|||||||
# Normalize tokens of the form J.K. to J. K.
|
# Normalize tokens of the form J.K. to J. K.
|
||||||
parts = token.split('.')
|
parts = token.split('.')
|
||||||
return '. '.join(map(capitalize, parts)).strip()
|
return '. '.join(map(capitalize, parts)).strip()
|
||||||
|
scots_name = None
|
||||||
|
for x in ('mc', 'mac'):
|
||||||
|
if (token.lower().startswith(x) and len(token) > len(x) and
|
||||||
|
(
|
||||||
|
token[len(x)] == upper(token[len(x)]) or
|
||||||
|
lt == token
|
||||||
|
)):
|
||||||
|
scots_name = len(x)
|
||||||
|
break
|
||||||
ans = capitalize(token)
|
ans = capitalize(token)
|
||||||
|
if scots_name is not None:
|
||||||
|
ans = ans[:scots_name] + upper(ans[scots_name]) + ans[scots_name+1:]
|
||||||
for x in ('-', "'"):
|
for x in ('-', "'"):
|
||||||
idx = ans.find(x)
|
idx = ans.find(x)
|
||||||
if idx > -1 and len(ans) > idx+2:
|
if idx > -1 and len(ans) > idx+2:
|
||||||
|
@ -305,7 +305,8 @@ class ISBNMerge(object):
|
|||||||
ans.pubdate = r.pubdate
|
ans.pubdate = r.pubdate
|
||||||
break
|
break
|
||||||
if getattr(ans.pubdate, 'year', None) == min_year:
|
if getattr(ans.pubdate, 'year', None) == min_year:
|
||||||
min_date = datetime(min_year, ans.pubdate.month, ans.pubdate.day)
|
min_date = datetime(min_year, ans.pubdate.month, ans.pubdate.day,
|
||||||
|
tzinfo=utc_tz)
|
||||||
else:
|
else:
|
||||||
min_date = datetime(min_year, 1, 2, tzinfo=utc_tz)
|
min_date = datetime(min_year, 1, 2, tzinfo=utc_tz)
|
||||||
ans.pubdate = min_date
|
ans.pubdate = min_date
|
||||||
|
@ -164,7 +164,14 @@ class MyBlockingBusy(QDialog): # {{{
|
|||||||
self.db.set_title(id, titlecase(title), notify=False)
|
self.db.set_title(id, titlecase(title), notify=False)
|
||||||
if do_title_sort:
|
if do_title_sort:
|
||||||
title = self.db.title(id, index_is_id=True)
|
title = self.db.title(id, index_is_id=True)
|
||||||
self.db.set_title_sort(id, title_sort(title), notify=False)
|
if languages:
|
||||||
|
lang = languages[0]
|
||||||
|
else:
|
||||||
|
lang = self.db.languages(id, index_is_id=True)
|
||||||
|
if lang:
|
||||||
|
lang = lang.partition(',')[0]
|
||||||
|
self.db.set_title_sort(id, title_sort(title, lang=lang),
|
||||||
|
notify=False)
|
||||||
if au:
|
if au:
|
||||||
self.db.set_authors(id, string_to_authors(au), notify=False)
|
self.db.set_authors(id, string_to_authors(au), notify=False)
|
||||||
if cover_action == 'remove':
|
if cover_action == 'remove':
|
||||||
|
@ -8,4 +8,3 @@ __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
|||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -28,7 +28,8 @@ from calibre.ebooks.metadata.sources.identify import (identify,
|
|||||||
urls_from_identifiers)
|
urls_from_identifiers)
|
||||||
from calibre.ebooks.metadata.book.base import Metadata
|
from calibre.ebooks.metadata.book.base import Metadata
|
||||||
from calibre.gui2 import error_dialog, NONE
|
from calibre.gui2 import error_dialog, NONE
|
||||||
from calibre.utils.date import utcnow, fromordinal, format_date
|
from calibre.utils.date import (utcnow, fromordinal, format_date,
|
||||||
|
UNDEFINED_DATE, as_utc)
|
||||||
from calibre.library.comments import comments_to_html
|
from calibre.library.comments import comments_to_html
|
||||||
from calibre import force_unicode
|
from calibre import force_unicode
|
||||||
# }}}
|
# }}}
|
||||||
@ -201,7 +202,12 @@ class ResultsModel(QAbstractTableModel): # {{{
|
|||||||
elif col == 1:
|
elif col == 1:
|
||||||
key = attrgetter('title')
|
key = attrgetter('title')
|
||||||
elif col == 2:
|
elif col == 2:
|
||||||
key = attrgetter('pubdate')
|
def dategetter(x):
|
||||||
|
x = getattr(x, 'pubdate', None)
|
||||||
|
if x is None:
|
||||||
|
x = UNDEFINED_DATE
|
||||||
|
return as_utc(x)
|
||||||
|
key = dategetter
|
||||||
elif col == 3:
|
elif col == 3:
|
||||||
key = attrgetter('has_cached_cover_url')
|
key = attrgetter('has_cached_cover_url')
|
||||||
elif key == 4:
|
elif key == 4:
|
||||||
|
@ -22,7 +22,8 @@ from calibre.gui2.store.stores.mobileread.store_dialog import MobileReadStoreDia
|
|||||||
|
|
||||||
class MobileReadStore(BasicStoreConfig, StorePlugin):
|
class MobileReadStore(BasicStoreConfig, StorePlugin):
|
||||||
|
|
||||||
def genesis(self):
|
def __init__(self, *args, **kwargs):
|
||||||
|
StorePlugin.__init__(self, *args, **kwargs)
|
||||||
self.lock = Lock()
|
self.lock = Lock()
|
||||||
|
|
||||||
def open(self, parent=None, detail_item=None, external=False):
|
def open(self, parent=None, detail_item=None, external=False):
|
||||||
@ -56,7 +57,8 @@ class MobileReadStore(BasicStoreConfig, StorePlugin):
|
|||||||
book.drm = SearchResult.DRM_UNLOCKED
|
book.drm = SearchResult.DRM_UNLOCKED
|
||||||
yield book
|
yield book
|
||||||
|
|
||||||
def update_cache(self, parent=None, timeout=10, force=False, suppress_progress=False):
|
def update_cache(self, parent=None, timeout=10, force=False,
|
||||||
|
suppress_progress=False):
|
||||||
if self.lock.acquire(False):
|
if self.lock.acquire(False):
|
||||||
try:
|
try:
|
||||||
update_thread = CacheUpdateThread(self.config, self.seralize_books, timeout)
|
update_thread = CacheUpdateThread(self.config, self.seralize_books, timeout)
|
||||||
|
@ -195,7 +195,8 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, EmailMixin, # {{{
|
|||||||
|
|
||||||
for ac in self.iactions.values():
|
for ac in self.iactions.values():
|
||||||
ac.do_genesis()
|
ac.do_genesis()
|
||||||
self.donate_action = QAction(QIcon(I('donate.png')), _('&Donate to support calibre'), self)
|
self.donate_action = QAction(QIcon(I('donate.png')),
|
||||||
|
_('&Donate to support calibre'), self)
|
||||||
for st in self.istores.values():
|
for st in self.istores.values():
|
||||||
st.do_genesis()
|
st.do_genesis()
|
||||||
MainWindowMixin.__init__(self, db)
|
MainWindowMixin.__init__(self, db)
|
||||||
|
@ -29,7 +29,8 @@ from calibre.utils.zipfile import ZipFile
|
|||||||
|
|
||||||
FIELDS = ['all', 'title', 'title_sort', 'author_sort', 'authors', 'comments',
|
FIELDS = ['all', 'title', 'title_sort', 'author_sort', 'authors', 'comments',
|
||||||
'cover', 'formats','id', 'isbn', 'ondevice', 'pubdate', 'publisher',
|
'cover', 'formats','id', 'isbn', 'ondevice', 'pubdate', 'publisher',
|
||||||
'rating', 'series_index', 'series', 'size', 'tags', 'timestamp', 'uuid']
|
'rating', 'series_index', 'series', 'size', 'tags', 'timestamp',
|
||||||
|
'uuid', 'languages']
|
||||||
|
|
||||||
#Allowed fields for template
|
#Allowed fields for template
|
||||||
TEMPLATE_ALLOWED_FIELDS = [ 'author_sort', 'authors', 'id', 'isbn', 'pubdate', 'title_sort',
|
TEMPLATE_ALLOWED_FIELDS = [ 'author_sort', 'authors', 'id', 'isbn', 'pubdate', 'title_sort',
|
||||||
@ -601,7 +602,7 @@ class BIBTEX(CatalogPlugin): # {{{
|
|||||||
bibtexc, db, citation_bibtex, addfiles_bibtex))
|
bibtexc, db, citation_bibtex, addfiles_bibtex))
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
class EPUB_MOBI(CatalogPlugin):
|
class EPUB_MOBI(CatalogPlugin): # {{{
|
||||||
'ePub catalog generator'
|
'ePub catalog generator'
|
||||||
|
|
||||||
Option = namedtuple('Option', 'option, default, dest, action, help')
|
Option = namedtuple('Option', 'option, default, dest, action, help')
|
||||||
@ -5177,3 +5178,4 @@ Author '{0}':
|
|||||||
|
|
||||||
# returns to gui2.actions.catalog:catalog_generated()
|
# returns to gui2.actions.catalog:catalog_generated()
|
||||||
return catalog.error
|
return catalog.error
|
||||||
|
# }}}
|
||||||
|
@ -3378,7 +3378,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
|||||||
prefix = self.library_path
|
prefix = self.library_path
|
||||||
FIELDS = set(['title', 'sort', 'authors', 'author_sort', 'publisher', 'rating',
|
FIELDS = set(['title', 'sort', 'authors', 'author_sort', 'publisher', 'rating',
|
||||||
'timestamp', 'size', 'tags', 'comments', 'series', 'series_index',
|
'timestamp', 'size', 'tags', 'comments', 'series', 'series_index',
|
||||||
'uuid', 'pubdate', 'last_modified', 'identifiers'])
|
'uuid', 'pubdate', 'last_modified', 'identifiers', 'languages'])
|
||||||
for x in self.custom_column_num_map:
|
for x in self.custom_column_num_map:
|
||||||
FIELDS.add(x)
|
FIELDS.add(x)
|
||||||
data = []
|
data = []
|
||||||
|
@ -195,14 +195,25 @@ class ContentServer(object):
|
|||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def get_format(self, id, format):
|
def get_format(self, id, format):
|
||||||
format = format.upper()
|
format = format.upper()
|
||||||
|
fm = self.db.format_metadata(id, format, allow_cache=False)
|
||||||
|
if not fm:
|
||||||
|
raise cherrypy.HTTPError(404, 'book: %d does not have format: %s'%(id, format))
|
||||||
|
mi = newmi = self.db.get_metadata(id, index_is_id=True)
|
||||||
|
|
||||||
|
cherrypy.response.headers['Last-Modified'] = \
|
||||||
|
self.last_modified(max(fm['mtime'], mi.last_modified))
|
||||||
|
|
||||||
fmt = self.db.format(id, format, index_is_id=True, as_file=True,
|
fmt = self.db.format(id, format, index_is_id=True, as_file=True,
|
||||||
mode='rb')
|
mode='rb')
|
||||||
if fmt is None:
|
if fmt is None:
|
||||||
raise cherrypy.HTTPError(404, 'book: %d does not have format: %s'%(id, format))
|
raise cherrypy.HTTPError(404, 'book: %d does not have format: %s'%(id, format))
|
||||||
mi = newmi = self.db.get_metadata(id, index_is_id=True)
|
mt = guess_type('dummy.'+format.lower())[0]
|
||||||
|
if mt is None:
|
||||||
|
mt = 'application/octet-stream'
|
||||||
|
cherrypy.response.headers['Content-Type'] = mt
|
||||||
|
|
||||||
if format == 'EPUB':
|
if format == 'EPUB':
|
||||||
# Get the original metadata
|
# Get the original metadata
|
||||||
|
|
||||||
@ -221,19 +232,19 @@ class ContentServer(object):
|
|||||||
set_metadata(fmt, newmi, format.lower())
|
set_metadata(fmt, newmi, format.lower())
|
||||||
fmt.seek(0)
|
fmt.seek(0)
|
||||||
|
|
||||||
mt = guess_type('dummy.'+format.lower())[0]
|
fmt.seek(0, 2)
|
||||||
if mt is None:
|
cherrypy.response.headers['Content-Length'] = fmt.tell()
|
||||||
mt = 'application/octet-stream'
|
fmt.seek(0)
|
||||||
au = authors_to_string(mi.authors if mi.authors else [_('Unknown')])
|
|
||||||
title = mi.title if mi.title else _('Unknown')
|
au = authors_to_string(newmi.authors if newmi.authors else
|
||||||
|
[_('Unknown')])
|
||||||
|
title = newmi.title if newmi.title else _('Unknown')
|
||||||
fname = u'%s - %s_%s.%s'%(title[:30], au[:30], id, format.lower())
|
fname = u'%s - %s_%s.%s'%(title[:30], au[:30], id, format.lower())
|
||||||
fname = ascii_filename(fname).replace('"', '_')
|
fname = ascii_filename(fname).replace('"', '_')
|
||||||
cherrypy.response.headers['Content-Type'] = mt
|
|
||||||
cherrypy.response.headers['Content-Disposition'] = \
|
cherrypy.response.headers['Content-Disposition'] = \
|
||||||
b'attachment; filename="%s"'%fname
|
b'attachment; filename="%s"'%fname
|
||||||
|
cherrypy.response.body = fmt
|
||||||
cherrypy.response.timeout = 3600
|
cherrypy.response.timeout = 3600
|
||||||
cherrypy.response.headers['Last-Modified'] = \
|
|
||||||
self.last_modified(self.db.format_last_modified(id, format))
|
|
||||||
return fmt
|
return fmt
|
||||||
# }}}
|
# }}}
|
||||||
|
|
||||||
|
@ -265,7 +265,7 @@ How do I use |app| with my Android phone/tablet?
|
|||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
There are two ways that you can connect your Android device to calibre. Using a USB cable-- or wirelessly, over the air.
|
There are two ways that you can connect your Android device to calibre. Using a USB cable-- or wirelessly, over the air.
|
||||||
The USB cable method only works if your Android device can act as a USB disk, which some Android tablets cannot.
|
**The USB cable method only works if your Android device can act as a USB disk, that means in windows it must have a drive letter, like K:**.
|
||||||
|
|
||||||
Using a USB cable
|
Using a USB cable
|
||||||
^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^
|
||||||
|
@ -13,7 +13,7 @@ from datetime import timedelta
|
|||||||
from lxml import etree
|
from lxml import etree
|
||||||
from lxml.builder import ElementMaker
|
from lxml.builder import ElementMaker
|
||||||
|
|
||||||
from calibre import browser
|
from calibre import browser, force_unicode
|
||||||
from calibre.utils.date import parse_date, now as nowf, utcnow, tzlocal, \
|
from calibre.utils.date import parse_date, now as nowf, utcnow, tzlocal, \
|
||||||
isoformat, fromordinal
|
isoformat, fromordinal
|
||||||
|
|
||||||
@ -66,8 +66,9 @@ def serialize_collection(mapping_of_recipe_classes):
|
|||||||
x.title.decode('ascii')
|
x.title.decode('ascii')
|
||||||
'''
|
'''
|
||||||
for urn in sorted(mapping_of_recipe_classes.keys(),
|
for urn in sorted(mapping_of_recipe_classes.keys(),
|
||||||
key=lambda key: getattr(mapping_of_recipe_classes[key], 'title',
|
key=lambda key: force_unicode(
|
||||||
'zzz')):
|
getattr(mapping_of_recipe_classes[key], 'title', 'zzz'),
|
||||||
|
'utf-8')):
|
||||||
recipe = serialize_recipe(urn, mapping_of_recipe_classes[urn])
|
recipe = serialize_recipe(urn, mapping_of_recipe_classes[urn])
|
||||||
collection.append(recipe)
|
collection.append(recipe)
|
||||||
collection.set('count', str(len(collection)))
|
collection.set('count', str(len(collection)))
|
||||||
|
Loading…
x
Reference in New Issue
Block a user