mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Merge from trunk
This commit is contained in:
commit
3728b83b07
@ -19,6 +19,66 @@
|
||||
# new recipes:
|
||||
# - title:
|
||||
|
||||
- version: 0.8.7
|
||||
date: 2011-06-24
|
||||
|
||||
new features:
|
||||
- title: "Connect to iTunes: You now need to tell iTunes to keep its own copy of every ebook. Do this in iTunes by going to Preferences->Advanced and setting the 'Copy files to iTunes Media folder when adding to library' option. To learn about why this is necessary, see: http://www.mobileread.com/forums/showthread.php?t=140260"
|
||||
type: major
|
||||
|
||||
- title: "Add a couple of date related functions to the calibre template langauge to get 'todays' date and create text based on the value of a date type field"
|
||||
|
||||
- title: "Improved reading of metadata from FB2 files, with support for reading isbns, tags, published date, etc."
|
||||
|
||||
- title: "Driver for the Imagine IMEB5"
|
||||
tickets: [800642]
|
||||
|
||||
- title: "Show the currently used network proxies in Preferences->Miscellaneous"
|
||||
|
||||
- title: "Kobo Touch driver: Show Favorites as a device collection. Various other minor fixes."
|
||||
|
||||
- title: "Content server now sends the Content-Disposition header when sending ebook files."
|
||||
|
||||
- title: "Allow search and replace on comments custom columns."
|
||||
|
||||
- title: "Add a new action 'Quick View' to show the books in your library by the author/tags/series/etc. of the currently selected book, in a separate window. You can add it to your toolbar or right click menu by going to Preferences->Toolbars."
|
||||
|
||||
- title: "Get Books: Add libri.de as a book source. Fix a bug that caused some books downloads to fail. Fixes to the Legimi and beam-ebooks.de stores"
|
||||
tickets: [799367]
|
||||
|
||||
bug fixes:
|
||||
- title: "Fix a memory leak that could result in the leaking of several MB of memory with large libraries"
|
||||
tickets: [800952]
|
||||
|
||||
- title: "Fix the read metadata from format button in the edit metadata dialog using the wrong timezone when setting published date"
|
||||
tickets: [799777]
|
||||
|
||||
- title: "Generating catalog: Fix occassional file in use errors when generating catalogs on windows"
|
||||
|
||||
- title: "Fix clicking on News in Tag Browser not working in non English locales."
|
||||
tickets: [799471]
|
||||
|
||||
- title: "HTML Input: Fix a regression in 0.8.6 that caused CSS stylesheets to be ignored"
|
||||
tickets: [799171]
|
||||
|
||||
- title: "Fix a regression that caused restore database to stop working on some windows sytems"
|
||||
|
||||
- title: "EPUB Output: Convert <br> tags with text in them into <divs> as ADE cannot handle them."
|
||||
tickets: [794427]
|
||||
|
||||
improved recipes:
|
||||
- Le Temps
|
||||
- Perfil
|
||||
- Financial Times UK
|
||||
|
||||
new recipes:
|
||||
- title: "Daytona Beach Journal"
|
||||
author: BRGriff
|
||||
|
||||
- title: "El club del ebook and Frontline"
|
||||
author: Darko Miletic
|
||||
|
||||
|
||||
- version: 0.8.6
|
||||
date: 2011-06-17
|
||||
|
||||
|
@ -1,32 +1,41 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Darko Miletic <darko.miletic at gmail.com>'
|
||||
__copyright__ = '2010-2011, Darko Miletic <darko.miletic at gmail.com>'
|
||||
'''
|
||||
ft.com
|
||||
www.ft.com
|
||||
'''
|
||||
|
||||
import datetime
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class FinancialTimes(BasicNewsRecipe):
|
||||
title = u'Financial Times'
|
||||
__author__ = 'Darko Miletic and Sujata Raman'
|
||||
description = ('Financial world news. Available after 5AM '
|
||||
'GMT, daily.')
|
||||
class FinancialTimes_rss(BasicNewsRecipe):
|
||||
title = 'Financial Times'
|
||||
__author__ = 'Darko Miletic'
|
||||
description = "The Financial Times (FT) is one of the world's leading business news and information organisations, recognised internationally for its authority, integrity and accuracy."
|
||||
publisher = 'The Financial Times Ltd.'
|
||||
category = 'news, finances, politics, World'
|
||||
oldest_article = 2
|
||||
language = 'en'
|
||||
|
||||
max_articles_per_feed = 100
|
||||
language = 'en'
|
||||
max_articles_per_feed = 250
|
||||
no_stylesheets = True
|
||||
use_embedded_content = False
|
||||
needs_subscription = True
|
||||
simultaneous_downloads= 1
|
||||
delay = 1
|
||||
encoding = 'utf8'
|
||||
publication_type = 'newspaper'
|
||||
masthead_url = 'http://im.media.ft.com/m/img/masthead_main.jpg'
|
||||
LOGIN = 'https://registration.ft.com/registration/barrier/login'
|
||||
INDEX = 'http://www.ft.com'
|
||||
|
||||
LOGIN = 'https://registration.ft.com/registration/barrier/login'
|
||||
conversion_options = {
|
||||
'comment' : description
|
||||
, 'tags' : category
|
||||
, 'publisher' : publisher
|
||||
, 'language' : language
|
||||
, 'linearize_tables' : True
|
||||
}
|
||||
|
||||
def get_browser(self):
|
||||
br = BasicNewsRecipe.get_browser()
|
||||
br.open(self.INDEX)
|
||||
if self.username is not None and self.password is not None:
|
||||
br.open(self.LOGIN)
|
||||
br.select_form(name='loginForm')
|
||||
@ -35,31 +44,63 @@ class FinancialTimes(BasicNewsRecipe):
|
||||
br.submit()
|
||||
return br
|
||||
|
||||
keep_only_tags = [ dict(name='div', attrs={'id':'cont'}) ]
|
||||
remove_tags_after = dict(name='p', attrs={'class':'copyright'})
|
||||
keep_only_tags = [dict(name='div', attrs={'class':['fullstory fullstoryHeader','fullstory fullstoryBody','ft-story-header','ft-story-body','index-detail']})]
|
||||
remove_tags = [
|
||||
dict(name='div', attrs={'id':'floating-con'})
|
||||
dict(name='div', attrs={'id':'floating-con'})
|
||||
,dict(name=['meta','iframe','base','object','embed','link'])
|
||||
,dict(attrs={'class':['storyTools','story-package','screen-copy','story-package separator','expandable-image']})
|
||||
]
|
||||
remove_attributes = ['width','height','lang']
|
||||
|
||||
extra_css = '''
|
||||
body{font-family:Arial,Helvetica,sans-serif;}
|
||||
h2(font-size:large;}
|
||||
.ft-story-header(font-size:xx-small;}
|
||||
.ft-story-body(font-size:small;}
|
||||
a{color:#003399;}
|
||||
extra_css = """
|
||||
body{font-family: Georgia,Times,"Times New Roman",serif}
|
||||
h2{font-size:large}
|
||||
.ft-story-header{font-size: x-small}
|
||||
.container{font-size:x-small;}
|
||||
h3{font-size:x-small;color:#003399;}
|
||||
'''
|
||||
.copyright{font-size: x-small}
|
||||
img{margin-top: 0.8em; display: block}
|
||||
.lastUpdated{font-family: Arial,Helvetica,sans-serif; font-size: x-small}
|
||||
.byline,.ft-story-body,.ft-story-header{font-family: Arial,Helvetica,sans-serif}
|
||||
"""
|
||||
|
||||
feeds = [
|
||||
(u'UK' , u'http://www.ft.com/rss/home/uk' )
|
||||
,(u'US' , u'http://www.ft.com/rss/home/us' )
|
||||
,(u'Europe' , u'http://www.ft.com/rss/home/europe' )
|
||||
,(u'Asia' , u'http://www.ft.com/rss/home/asia' )
|
||||
,(u'Middle East', u'http://www.ft.com/rss/home/middleeast')
|
||||
]
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
content_type = soup.find('meta', {'http-equiv':'Content-Type'})
|
||||
if content_type:
|
||||
content_type['content'] = 'text/html; charset=utf-8'
|
||||
items = ['promo-box','promo-title',
|
||||
'promo-headline','promo-image',
|
||||
'promo-intro','promo-link','subhead']
|
||||
for item in items:
|
||||
for it in soup.findAll(item):
|
||||
it.name = 'div'
|
||||
it.attrs = []
|
||||
for item in soup.findAll(style=True):
|
||||
del item['style']
|
||||
for item in soup.findAll('a'):
|
||||
limg = item.find('img')
|
||||
if item.string is not None:
|
||||
str = item.string
|
||||
item.replaceWith(str)
|
||||
else:
|
||||
if limg:
|
||||
item.name = 'div'
|
||||
item.attrs = []
|
||||
else:
|
||||
str = self.tag_to_string(item)
|
||||
item.replaceWith(str)
|
||||
for item in soup.findAll('img'):
|
||||
if not item.has_key('alt'):
|
||||
item['alt'] = 'image'
|
||||
return soup
|
||||
|
||||
def get_cover_url(self):
|
||||
cdate = datetime.date.today()
|
||||
if cdate.isoweekday() == 7:
|
||||
cdate -= datetime.timedelta(days=1)
|
||||
return cdate.strftime('http://specials.ft.com/vtf_pdf/%d%m%y_FRONT1_USA.pdf')
|
||||
|
||||
|
@ -1,15 +1,19 @@
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010-2011, Darko Miletic <darko.miletic at gmail.com>'
|
||||
'''
|
||||
ft.com
|
||||
www.ft.com/uk-edition
|
||||
'''
|
||||
|
||||
import datetime
|
||||
from calibre import strftime
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class FinancialTimes(BasicNewsRecipe):
|
||||
title = u'Financial Times - UK printed edition'
|
||||
title = 'Financial Times - UK printed edition'
|
||||
__author__ = 'Darko Miletic'
|
||||
description = 'Financial world news'
|
||||
description = "The Financial Times (FT) is one of the world's leading business news and information organisations, recognised internationally for its authority, integrity and accuracy."
|
||||
publisher = 'The Financial Times Ltd.'
|
||||
category = 'news, finances, politics, UK, World'
|
||||
oldest_article = 2
|
||||
language = 'en_GB'
|
||||
max_articles_per_feed = 250
|
||||
@ -17,14 +21,23 @@ class FinancialTimes(BasicNewsRecipe):
|
||||
use_embedded_content = False
|
||||
needs_subscription = True
|
||||
encoding = 'utf8'
|
||||
simultaneous_downloads= 1
|
||||
delay = 1
|
||||
publication_type = 'newspaper'
|
||||
masthead_url = 'http://im.media.ft.com/m/img/masthead_main.jpg'
|
||||
LOGIN = 'https://registration.ft.com/registration/barrier/login'
|
||||
INDEX = 'http://www.ft.com/uk-edition'
|
||||
PREFIX = 'http://www.ft.com'
|
||||
|
||||
conversion_options = {
|
||||
'comment' : description
|
||||
, 'tags' : category
|
||||
, 'publisher' : publisher
|
||||
, 'language' : language
|
||||
, 'linearize_tables' : True
|
||||
}
|
||||
|
||||
def get_browser(self):
|
||||
br = BasicNewsRecipe.get_browser()
|
||||
br.open(self.INDEX)
|
||||
if self.username is not None and self.password is not None:
|
||||
br.open(self.LOGIN)
|
||||
br.select_form(name='loginForm')
|
||||
@ -33,29 +46,34 @@ class FinancialTimes(BasicNewsRecipe):
|
||||
br.submit()
|
||||
return br
|
||||
|
||||
keep_only_tags = [ dict(name='div', attrs={'id':'cont'}) ]
|
||||
remove_tags_after = dict(name='p', attrs={'class':'copyright'})
|
||||
keep_only_tags = [dict(name='div', attrs={'class':['fullstory fullstoryHeader','fullstory fullstoryBody','ft-story-header','ft-story-body','index-detail']})]
|
||||
remove_tags = [
|
||||
dict(name='div', attrs={'id':'floating-con'})
|
||||
,dict(name=['meta','iframe','base','object','embed','link'])
|
||||
,dict(attrs={'class':['storyTools','story-package','screen-copy','story-package separator','expandable-image']})
|
||||
]
|
||||
remove_attributes = ['width','height','lang']
|
||||
|
||||
extra_css = """
|
||||
body{font-family:Arial,Helvetica,sans-serif;}
|
||||
h2{font-size:large;}
|
||||
.ft-story-header{font-size:xx-small;}
|
||||
.ft-story-body{font-size:small;}
|
||||
a{color:#003399;}
|
||||
body{font-family: Georgia,Times,"Times New Roman",serif}
|
||||
h2{font-size:large}
|
||||
.ft-story-header{font-size: x-small}
|
||||
.container{font-size:x-small;}
|
||||
h3{font-size:x-small;color:#003399;}
|
||||
.copyright{font-size: x-small}
|
||||
img{margin-top: 0.8em; display: block}
|
||||
.lastUpdated{font-family: Arial,Helvetica,sans-serif; font-size: x-small}
|
||||
.byline,.ft-story-body,.ft-story-header{font-family: Arial,Helvetica,sans-serif}
|
||||
"""
|
||||
|
||||
def get_artlinks(self, elem):
|
||||
articles = []
|
||||
for item in elem.findAll('a',href=True):
|
||||
url = self.PREFIX + item['href']
|
||||
rawlink = item['href']
|
||||
if rawlink.startswith('http://'):
|
||||
url = rawlink
|
||||
else:
|
||||
url = self.PREFIX + rawlink
|
||||
title = self.tag_to_string(item)
|
||||
date = strftime(self.timefmt)
|
||||
articles.append({
|
||||
@ -65,7 +83,7 @@ class FinancialTimes(BasicNewsRecipe):
|
||||
,'description':''
|
||||
})
|
||||
return articles
|
||||
|
||||
|
||||
def parse_index(self):
|
||||
feeds = []
|
||||
soup = self.index_to_soup(self.INDEX)
|
||||
@ -80,11 +98,41 @@ class FinancialTimes(BasicNewsRecipe):
|
||||
strest.insert(0,st)
|
||||
for item in strest:
|
||||
ftitle = self.tag_to_string(item)
|
||||
self.report_progress(0, _('Fetching feed')+' %s...'%(ftitle))
|
||||
self.report_progress(0, _('Fetching feed')+' %s...'%(ftitle))
|
||||
feedarts = self.get_artlinks(item.parent.ul)
|
||||
feeds.append((ftitle,feedarts))
|
||||
return feeds
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
return self.adeify_images(soup)
|
||||
items = ['promo-box','promo-title',
|
||||
'promo-headline','promo-image',
|
||||
'promo-intro','promo-link','subhead']
|
||||
for item in items:
|
||||
for it in soup.findAll(item):
|
||||
it.name = 'div'
|
||||
it.attrs = []
|
||||
for item in soup.findAll(style=True):
|
||||
del item['style']
|
||||
for item in soup.findAll('a'):
|
||||
limg = item.find('img')
|
||||
if item.string is not None:
|
||||
str = item.string
|
||||
item.replaceWith(str)
|
||||
else:
|
||||
if limg:
|
||||
item.name = 'div'
|
||||
item.attrs = []
|
||||
else:
|
||||
str = self.tag_to_string(item)
|
||||
item.replaceWith(str)
|
||||
for item in soup.findAll('img'):
|
||||
if not item.has_key('alt'):
|
||||
item['alt'] = 'image'
|
||||
return soup
|
||||
|
||||
def get_cover_url(self):
|
||||
cdate = datetime.date.today()
|
||||
if cdate.isoweekday() == 7:
|
||||
cdate -= datetime.timedelta(days=1)
|
||||
return cdate.strftime('http://specials.ft.com/vtf_pdf/%d%m%y_FRONT1_LON.pdf')
|
||||
|
BIN
recipes/icons/financial_times.png
Normal file
BIN
recipes/icons/financial_times.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.4 KiB |
BIN
recipes/icons/financial_times_uk.png
Normal file
BIN
recipes/icons/financial_times_uk.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.4 KiB |
@ -1,17 +1,23 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010-2011, Eddie Lau'
|
||||
|
||||
# Region - Hong Kong, Vancouver, Toronto
|
||||
__Region__ = 'Hong Kong'
|
||||
# Users of Kindle 3 with limited system-level CJK support
|
||||
# please replace the following "True" with "False".
|
||||
__MakePeriodical__ = True
|
||||
# Turn below to true if your device supports display of CJK titles
|
||||
__UseChineseTitle__ = False
|
||||
# Trun below to true if you wish to use life.mingpao.com as the main article source
|
||||
# Set it to False if you want to skip images
|
||||
__KeepImages__ = True
|
||||
# (HK only) Turn below to true if you wish to use life.mingpao.com as the main article source
|
||||
__UseLife__ = True
|
||||
|
||||
|
||||
'''
|
||||
Change Log:
|
||||
2011/06/26: add fetching Vancouver and Toronto versions of the paper, also provide captions for images using life.mingpao fetch source
|
||||
provide options to remove all images in the file
|
||||
2011/05/12: switch the main parse source to life.mingpao.com, which has more photos on the article pages
|
||||
2011/03/06: add new articles for finance section, also a new section "Columns"
|
||||
2011/02/28: rearrange the sections
|
||||
@ -34,21 +40,96 @@ Change Log:
|
||||
import os, datetime, re
|
||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||
from contextlib import nested
|
||||
|
||||
|
||||
from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
||||
from calibre.ebooks.metadata.opf2 import OPFCreator
|
||||
from calibre.ebooks.metadata.toc import TOC
|
||||
from calibre.ebooks.metadata import MetaInformation
|
||||
|
||||
class MPHKRecipe(BasicNewsRecipe):
|
||||
title = 'Ming Pao - Hong Kong'
|
||||
# MAIN CLASS
|
||||
class MPRecipe(BasicNewsRecipe):
|
||||
if __Region__ == 'Hong Kong':
|
||||
title = 'Ming Pao - Hong Kong'
|
||||
description = 'Hong Kong Chinese Newspaper (http://news.mingpao.com)'
|
||||
category = 'Chinese, News, Hong Kong'
|
||||
extra_css = 'img {display: block; margin-left: auto; margin-right: auto; margin-top: 10px; margin-bottom: 10px;} font>b {font-size:200%; font-weight:bold;}'
|
||||
masthead_url = 'http://news.mingpao.com/image/portals_top_logo_news.gif'
|
||||
keep_only_tags = [dict(name='h1'),
|
||||
dict(name='font', attrs={'style':['font-size:14pt; line-height:160%;']}), # for entertainment page title
|
||||
dict(name='font', attrs={'color':['AA0000']}), # for column articles title
|
||||
dict(attrs={'id':['newscontent']}), # entertainment and column page content
|
||||
dict(attrs={'id':['newscontent01','newscontent02']}),
|
||||
dict(attrs={'class':['photo']}),
|
||||
dict(name='table', attrs={'width':['100%'], 'border':['0'], 'cellspacing':['5'], 'cellpadding':['0']}), # content in printed version of life.mingpao.com
|
||||
dict(name='img', attrs={'width':['180'], 'alt':['按圖放大']}) # images for source from life.mingpao.com
|
||||
]
|
||||
if __KeepImages__:
|
||||
remove_tags = [dict(name='style'),
|
||||
dict(attrs={'id':['newscontent135']}), # for the finance page from mpfinance.com
|
||||
dict(name='font', attrs={'size':['2'], 'color':['666666']}), # article date in life.mingpao.com article
|
||||
#dict(name='table') # for content fetched from life.mingpao.com
|
||||
]
|
||||
else:
|
||||
remove_tags = [dict(name='style'),
|
||||
dict(attrs={'id':['newscontent135']}), # for the finance page from mpfinance.com
|
||||
dict(name='font', attrs={'size':['2'], 'color':['666666']}), # article date in life.mingpao.com article
|
||||
dict(name='img'),
|
||||
#dict(name='table') # for content fetched from life.mingpao.com
|
||||
]
|
||||
remove_attributes = ['width']
|
||||
preprocess_regexps = [
|
||||
(re.compile(r'<h5>', re.DOTALL|re.IGNORECASE),
|
||||
lambda match: '<h1>'),
|
||||
(re.compile(r'</h5>', re.DOTALL|re.IGNORECASE),
|
||||
lambda match: '</h1>'),
|
||||
(re.compile(r'<p><a href=.+?</a></p>', re.DOTALL|re.IGNORECASE), # for entertainment page
|
||||
lambda match: ''),
|
||||
# skip <br> after title in life.mingpao.com fetched article
|
||||
(re.compile(r"<div id='newscontent'><br>", re.DOTALL|re.IGNORECASE),
|
||||
lambda match: "<div id='newscontent'>"),
|
||||
(re.compile(r"<br><br></b>", re.DOTALL|re.IGNORECASE),
|
||||
lambda match: "</b>")
|
||||
]
|
||||
elif __Region__ == 'Vancouver':
|
||||
title = 'Ming Pao - Vancouver'
|
||||
description = 'Vancouver Chinese Newspaper (http://www.mingpaovan.com)'
|
||||
category = 'Chinese, News, Vancouver'
|
||||
extra_css = 'img {display: block; margin-left: auto; margin-right: auto; margin-top: 10px; margin-bottom: 10px;} b>font {font-size:200%; font-weight:bold;}'
|
||||
masthead_url = 'http://www.mingpaovan.com/image/mainlogo2_VAN2.gif'
|
||||
keep_only_tags = [dict(name='table', attrs={'width':['450'], 'border':['0'], 'cellspacing':['0'], 'cellpadding':['1']}),
|
||||
dict(name='table', attrs={'width':['450'], 'border':['0'], 'cellspacing':['3'], 'cellpadding':['3'], 'id':['tblContent3']}),
|
||||
dict(name='table', attrs={'width':['180'], 'border':['0'], 'cellspacing':['0'], 'cellpadding':['0'], 'bgcolor':['F0F0F0']}),
|
||||
]
|
||||
if __KeepImages__:
|
||||
remove_tags = [dict(name='img', attrs={'src':['../../../image/magnifier.gif']})] # the magnifier icon
|
||||
else:
|
||||
remove_tags = [dict(name='img')]
|
||||
remove_attributes = ['width']
|
||||
preprocess_regexps = [(re.compile(r' ', re.DOTALL|re.IGNORECASE),
|
||||
lambda match: ''),
|
||||
]
|
||||
elif __Region__ == 'Toronto':
|
||||
title = 'Ming Pao - Toronto'
|
||||
description = 'Toronto Chinese Newspaper (http://www.mingpaotor.com)'
|
||||
category = 'Chinese, News, Toronto'
|
||||
extra_css = 'img {display: block; margin-left: auto; margin-right: auto; margin-top: 10px; margin-bottom: 10px;} b>font {font-size:200%; font-weight:bold;}'
|
||||
masthead_url = 'http://www.mingpaotor.com/image/mainlogo2_TOR2.gif'
|
||||
keep_only_tags = [dict(name='table', attrs={'width':['450'], 'border':['0'], 'cellspacing':['0'], 'cellpadding':['1']}),
|
||||
dict(name='table', attrs={'width':['450'], 'border':['0'], 'cellspacing':['3'], 'cellpadding':['3'], 'id':['tblContent3']}),
|
||||
dict(name='table', attrs={'width':['180'], 'border':['0'], 'cellspacing':['0'], 'cellpadding':['0'], 'bgcolor':['F0F0F0']}),
|
||||
]
|
||||
if __KeepImages__:
|
||||
remove_tags = [dict(name='img', attrs={'src':['../../../image/magnifier.gif']})] # the magnifier icon
|
||||
else:
|
||||
remove_tags = [dict(name='img')]
|
||||
remove_attributes = ['width']
|
||||
preprocess_regexps = [(re.compile(r' ', re.DOTALL|re.IGNORECASE),
|
||||
lambda match: ''),
|
||||
]
|
||||
|
||||
oldest_article = 1
|
||||
max_articles_per_feed = 100
|
||||
__author__ = 'Eddie Lau'
|
||||
description = 'Hong Kong Chinese Newspaper (http://news.mingpao.com)'
|
||||
publisher = 'MingPao'
|
||||
category = 'Chinese, News, Hong Kong'
|
||||
remove_javascript = True
|
||||
use_embedded_content = False
|
||||
no_stylesheets = True
|
||||
@ -57,33 +138,6 @@ class MPHKRecipe(BasicNewsRecipe):
|
||||
recursions = 0
|
||||
conversion_options = {'linearize_tables':True}
|
||||
timefmt = ''
|
||||
extra_css = 'img {display: block; margin-left: auto; margin-right: auto; margin-top: 10px; margin-bottom: 10px;} font>b {font-size:200%; font-weight:bold;}'
|
||||
masthead_url = 'http://news.mingpao.com/image/portals_top_logo_news.gif'
|
||||
keep_only_tags = [dict(name='h1'),
|
||||
dict(name='font', attrs={'style':['font-size:14pt; line-height:160%;']}), # for entertainment page title
|
||||
dict(name='font', attrs={'color':['AA0000']}), # for column articles title
|
||||
dict(attrs={'id':['newscontent']}), # entertainment and column page content
|
||||
dict(attrs={'id':['newscontent01','newscontent02']}),
|
||||
dict(attrs={'class':['photo']}),
|
||||
dict(name='img', attrs={'width':['180'], 'alt':['按圖放大']}) # images for source from life.mingpao.com
|
||||
]
|
||||
remove_tags = [dict(name='style'),
|
||||
dict(attrs={'id':['newscontent135']}), # for the finance page from mpfinance.com
|
||||
dict(name='table')] # for content fetched from life.mingpao.com
|
||||
remove_attributes = ['width']
|
||||
preprocess_regexps = [
|
||||
(re.compile(r'<h5>', re.DOTALL|re.IGNORECASE),
|
||||
lambda match: '<h1>'),
|
||||
(re.compile(r'</h5>', re.DOTALL|re.IGNORECASE),
|
||||
lambda match: '</h1>'),
|
||||
(re.compile(r'<p><a href=.+?</a></p>', re.DOTALL|re.IGNORECASE), # for entertainment page
|
||||
lambda match: ''),
|
||||
# skip <br> after title in life.mingpao.com fetched article
|
||||
(re.compile(r"<div id='newscontent'><br>", re.DOTALL|re.IGNORECASE),
|
||||
lambda match: "<div id='newscontent'>"),
|
||||
(re.compile(r"<br><br></b>", re.DOTALL|re.IGNORECASE),
|
||||
lambda match: "</b>")
|
||||
]
|
||||
|
||||
def image_url_processor(cls, baseurl, url):
|
||||
# trick: break the url at the first occurance of digit, add an additional
|
||||
@ -124,8 +178,18 @@ class MPHKRecipe(BasicNewsRecipe):
|
||||
|
||||
def get_dtlocal(self):
|
||||
dt_utc = datetime.datetime.utcnow()
|
||||
# convert UTC to local hk time - at around HKT 6.00am, all news are available
|
||||
dt_local = dt_utc - datetime.timedelta(-2.0/24)
|
||||
if __Region__ == 'Hong Kong':
|
||||
# convert UTC to local hk time - at HKT 4.30am, all news are available
|
||||
dt_local = dt_utc + datetime.timedelta(8.0/24) - datetime.timedelta(4.5/24)
|
||||
# dt_local = dt_utc.astimezone(pytz.timezone('Asia/Hong_Kong')) - datetime.timedelta(4.5/24)
|
||||
elif __Region__ == 'Vancouver':
|
||||
# convert UTC to local Vancouver time - at PST time 4.30am, all news are available
|
||||
dt_local = dt_utc + datetime.timedelta(-8.0/24) - datetime.timedelta(4.5/24)
|
||||
#dt_local = dt_utc.astimezone(pytz.timezone('America/Vancouver')) - datetime.timedelta(4.5/24)
|
||||
elif __Region__ == 'Toronto':
|
||||
# convert UTC to local Toronto time - at EST time 4.30am, all news are available
|
||||
dt_local = dt_utc + datetime.timedelta(-5.0/24) - datetime.timedelta(4.5/24)
|
||||
#dt_local = dt_utc.astimezone(pytz.timezone('America/Toronto')) - datetime.timedelta(4.5/24)
|
||||
return dt_local
|
||||
|
||||
def get_fetchdate(self):
|
||||
@ -135,13 +199,15 @@ class MPHKRecipe(BasicNewsRecipe):
|
||||
return self.get_dtlocal().strftime("%Y-%m-%d")
|
||||
|
||||
def get_fetchday(self):
|
||||
# dt_utc = datetime.datetime.utcnow()
|
||||
# convert UTC to local hk time - at around HKT 6.00am, all news are available
|
||||
# dt_local = dt_utc - datetime.timedelta(-2.0/24)
|
||||
return self.get_dtlocal().strftime("%d")
|
||||
|
||||
def get_cover_url(self):
|
||||
cover = 'http://news.mingpao.com/' + self.get_fetchdate() + '/' + self.get_fetchdate() + '_' + self.get_fetchday() + 'gacov.jpg'
|
||||
if __Region__ == 'Hong Kong':
|
||||
cover = 'http://news.mingpao.com/' + self.get_fetchdate() + '/' + self.get_fetchdate() + '_' + self.get_fetchday() + 'gacov.jpg'
|
||||
elif __Region__ == 'Vancouver':
|
||||
cover = 'http://www.mingpaovan.com/ftp/News/' + self.get_fetchdate() + '/' + self.get_fetchday() + 'pgva1s.jpg'
|
||||
elif __Region__ == 'Toronto':
|
||||
cover = 'http://www.mingpaotor.com/ftp/News/' + self.get_fetchdate() + '/' + self.get_fetchday() + 'pgtas.jpg'
|
||||
br = BasicNewsRecipe.get_browser()
|
||||
try:
|
||||
br.open(cover)
|
||||
@ -153,76 +219,104 @@ class MPHKRecipe(BasicNewsRecipe):
|
||||
feeds = []
|
||||
dateStr = self.get_fetchdate()
|
||||
|
||||
if __UseLife__:
|
||||
for title, url, keystr in [(u'\u8981\u805e Headline', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalga', 'nal'),
|
||||
(u'\u6e2f\u805e Local', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalgb', 'nal'),
|
||||
(u'\u6559\u80b2 Education', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalgf', 'nal'),
|
||||
(u'\u793e\u8a55/\u7b46\u9663 Editorial', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=nalmr', 'nal'),
|
||||
(u'\u8ad6\u58c7 Forum', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=nalfa', 'nal'),
|
||||
(u'\u4e2d\u570b China', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=nalca', 'nal'),
|
||||
(u'\u570b\u969b World', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=nalta', 'nal'),
|
||||
(u'\u7d93\u6fdf Finance', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalea', 'nal'),
|
||||
(u'\u9ad4\u80b2 Sport', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalsp', 'nal'),
|
||||
(u'\u5f71\u8996 Film/TV', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalma', 'nal'),
|
||||
(u'\u5c08\u6b04 Columns', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=ncolumn', 'ncl')]:
|
||||
articles = self.parse_section2(url, keystr)
|
||||
if __Region__ == 'Hong Kong':
|
||||
if __UseLife__:
|
||||
for title, url, keystr in [(u'\u8981\u805e Headline', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalga', 'nal'),
|
||||
(u'\u6e2f\u805e Local', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalgb', 'nal'),
|
||||
(u'\u6559\u80b2 Education', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalgf', 'nal'),
|
||||
(u'\u793e\u8a55/\u7b46\u9663 Editorial', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=nalmr', 'nal'),
|
||||
(u'\u8ad6\u58c7 Forum', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=nalfa', 'nal'),
|
||||
(u'\u4e2d\u570b China', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=nalca', 'nal'),
|
||||
(u'\u570b\u969b World', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=nalta', 'nal'),
|
||||
(u'\u7d93\u6fdf Finance', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalea', 'nal'),
|
||||
(u'\u9ad4\u80b2 Sport', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalsp', 'nal'),
|
||||
(u'\u5f71\u8996 Film/TV', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalma', 'nal'),
|
||||
(u'\u5c08\u6b04 Columns', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=ncolumn', 'ncl')]:
|
||||
articles = self.parse_section2(url, keystr)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
|
||||
for title, url in [(u'\u526f\u520a Supplement', 'http://news.mingpao.com/' + dateStr + '/jaindex.htm'),
|
||||
(u'\u82f1\u6587 English', 'http://news.mingpao.com/' + dateStr + '/emindex.htm')]:
|
||||
articles = self.parse_section(url)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
else:
|
||||
for title, url in [(u'\u8981\u805e Headline', 'http://news.mingpao.com/' + dateStr + '/gaindex.htm'),
|
||||
(u'\u6e2f\u805e Local', 'http://news.mingpao.com/' + dateStr + '/gbindex.htm'),
|
||||
(u'\u6559\u80b2 Education', 'http://news.mingpao.com/' + dateStr + '/gfindex.htm')]:
|
||||
articles = self.parse_section(url)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
|
||||
# special- editorial
|
||||
ed_articles = self.parse_ed_section('http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=nalmr')
|
||||
if ed_articles:
|
||||
feeds.append((u'\u793e\u8a55/\u7b46\u9663 Editorial', ed_articles))
|
||||
|
||||
for title, url in [(u'\u8ad6\u58c7 Forum', 'http://news.mingpao.com/' + dateStr + '/faindex.htm'),
|
||||
(u'\u4e2d\u570b China', 'http://news.mingpao.com/' + dateStr + '/caindex.htm'),
|
||||
(u'\u570b\u969b World', 'http://news.mingpao.com/' + dateStr + '/taindex.htm')]:
|
||||
articles = self.parse_section(url)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
|
||||
# special - finance
|
||||
#fin_articles = self.parse_fin_section('http://www.mpfinance.com/htm/Finance/' + dateStr + '/News/ea,eb,ecindex.htm')
|
||||
fin_articles = self.parse_fin_section('http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalea')
|
||||
if fin_articles:
|
||||
feeds.append((u'\u7d93\u6fdf Finance', fin_articles))
|
||||
|
||||
for title, url in [('Tech News', 'http://news.mingpao.com/' + dateStr + '/naindex.htm'),
|
||||
(u'\u9ad4\u80b2 Sport', 'http://news.mingpao.com/' + dateStr + '/spindex.htm')]:
|
||||
articles = self.parse_section(url)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
|
||||
# special - entertainment
|
||||
ent_articles = self.parse_ent_section('http://ol.mingpao.com/cfm/star1.cfm')
|
||||
if ent_articles:
|
||||
feeds.append((u'\u5f71\u8996 Film/TV', ent_articles))
|
||||
|
||||
for title, url in [(u'\u526f\u520a Supplement', 'http://news.mingpao.com/' + dateStr + '/jaindex.htm'),
|
||||
(u'\u82f1\u6587 English', 'http://news.mingpao.com/' + dateStr + '/emindex.htm')]:
|
||||
articles = self.parse_section(url)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
|
||||
|
||||
# special- columns
|
||||
col_articles = self.parse_col_section('http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=ncolumn')
|
||||
if col_articles:
|
||||
feeds.append((u'\u5c08\u6b04 Columns', col_articles))
|
||||
elif __Region__ == 'Vancouver':
|
||||
for title, url in [(u'\u8981\u805e Headline', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/VAindex.htm'),
|
||||
(u'\u52a0\u570b Canada', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/VBindex.htm'),
|
||||
(u'\u793e\u5340 Local', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/VDindex.htm'),
|
||||
(u'\u6e2f\u805e Hong Kong', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/HK-VGindex.htm'),
|
||||
(u'\u570b\u969b World', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/VTindex.htm'),
|
||||
(u'\u4e2d\u570b China', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/VCindex.htm'),
|
||||
(u'\u7d93\u6fdf Economics', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/VEindex.htm'),
|
||||
(u'\u9ad4\u80b2 Sports', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/VSindex.htm'),
|
||||
(u'\u5f71\u8996 Film/TV', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/HK-MAindex.htm'),
|
||||
(u'\u526f\u520a Supplements', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/WWindex.htm'),]:
|
||||
articles = self.parse_section3(url, 'http://www.mingpaovan.com/')
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
|
||||
for title, url in [(u'\u526f\u520a Supplement', 'http://news.mingpao.com/' + dateStr + '/jaindex.htm'),
|
||||
(u'\u82f1\u6587 English', 'http://news.mingpao.com/' + dateStr + '/emindex.htm')]:
|
||||
articles = self.parse_section(url)
|
||||
elif __Region__ == 'Toronto':
|
||||
for title, url in [(u'\u8981\u805e Headline', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/TAindex.htm'),
|
||||
(u'\u52a0\u570b Canada', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/TDindex.htm'),
|
||||
(u'\u793e\u5340 Local', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/TFindex.htm'),
|
||||
(u'\u4e2d\u570b China', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/TCAindex.htm'),
|
||||
(u'\u570b\u969b World', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/TTAindex.htm'),
|
||||
(u'\u6e2f\u805e Hong Kong', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/HK-GAindex.htm'),
|
||||
(u'\u7d93\u6fdf Economics', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/THindex.htm'),
|
||||
(u'\u9ad4\u80b2 Sports', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/TSindex.htm'),
|
||||
(u'\u5f71\u8996 Film/TV', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/HK-MAindex.htm'),
|
||||
(u'\u526f\u520a Supplements', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/WWindex.htm'),]:
|
||||
articles = self.parse_section3(url, 'http://www.mingpaotor.com/')
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
else:
|
||||
for title, url in [(u'\u8981\u805e Headline', 'http://news.mingpao.com/' + dateStr + '/gaindex.htm'),
|
||||
(u'\u6e2f\u805e Local', 'http://news.mingpao.com/' + dateStr + '/gbindex.htm'),
|
||||
(u'\u6559\u80b2 Education', 'http://news.mingpao.com/' + dateStr + '/gfindex.htm')]:
|
||||
articles = self.parse_section(url)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
|
||||
# special- editorial
|
||||
ed_articles = self.parse_ed_section('http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=nalmr')
|
||||
if ed_articles:
|
||||
feeds.append((u'\u793e\u8a55/\u7b46\u9663 Editorial', ed_articles))
|
||||
|
||||
for title, url in [(u'\u8ad6\u58c7 Forum', 'http://news.mingpao.com/' + dateStr + '/faindex.htm'),
|
||||
(u'\u4e2d\u570b China', 'http://news.mingpao.com/' + dateStr + '/caindex.htm'),
|
||||
(u'\u570b\u969b World', 'http://news.mingpao.com/' + dateStr + '/taindex.htm')]:
|
||||
articles = self.parse_section(url)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
|
||||
# special - finance
|
||||
#fin_articles = self.parse_fin_section('http://www.mpfinance.com/htm/Finance/' + dateStr + '/News/ea,eb,ecindex.htm')
|
||||
fin_articles = self.parse_fin_section('http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalea')
|
||||
if fin_articles:
|
||||
feeds.append((u'\u7d93\u6fdf Finance', fin_articles))
|
||||
|
||||
for title, url in [('Tech News', 'http://news.mingpao.com/' + dateStr + '/naindex.htm'),
|
||||
(u'\u9ad4\u80b2 Sport', 'http://news.mingpao.com/' + dateStr + '/spindex.htm')]:
|
||||
articles = self.parse_section(url)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
|
||||
# special - entertainment
|
||||
ent_articles = self.parse_ent_section('http://ol.mingpao.com/cfm/star1.cfm')
|
||||
if ent_articles:
|
||||
feeds.append((u'\u5f71\u8996 Film/TV', ent_articles))
|
||||
|
||||
for title, url in [(u'\u526f\u520a Supplement', 'http://news.mingpao.com/' + dateStr + '/jaindex.htm'),
|
||||
(u'\u82f1\u6587 English', 'http://news.mingpao.com/' + dateStr + '/emindex.htm')]:
|
||||
articles = self.parse_section(url)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
|
||||
|
||||
# special- columns
|
||||
col_articles = self.parse_col_section('http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=ncolumn')
|
||||
if col_articles:
|
||||
feeds.append((u'\u5c08\u6b04 Columns', col_articles))
|
||||
|
||||
return feeds
|
||||
|
||||
# parse from news.mingpao.com
|
||||
@ -256,11 +350,30 @@ class MPHKRecipe(BasicNewsRecipe):
|
||||
title = self.tag_to_string(i)
|
||||
url = 'http://life.mingpao.com/cfm/' + i.get('href', False)
|
||||
if (url not in included_urls) and (not url.rfind('.txt') == -1) and (not url.rfind(keystr) == -1):
|
||||
url = url.replace('dailynews3.cfm', 'dailynews3a.cfm') # use printed version of the article
|
||||
current_articles.append({'title': title, 'url': url, 'description': ''})
|
||||
included_urls.append(url)
|
||||
current_articles.reverse()
|
||||
return current_articles
|
||||
|
||||
# parse from www.mingpaovan.com
|
||||
def parse_section3(self, url, baseUrl):
|
||||
self.get_fetchdate()
|
||||
soup = self.index_to_soup(url)
|
||||
divs = soup.findAll(attrs={'class': ['ListContentLargeLink']})
|
||||
current_articles = []
|
||||
included_urls = []
|
||||
divs.reverse()
|
||||
for i in divs:
|
||||
title = self.tag_to_string(i)
|
||||
urlstr = i.get('href', False)
|
||||
urlstr = baseUrl + '/' + urlstr.replace('../../../', '')
|
||||
if urlstr not in included_urls:
|
||||
current_articles.append({'title': title, 'url': urlstr, 'description': '', 'date': ''})
|
||||
included_urls.append(urlstr)
|
||||
current_articles.reverse()
|
||||
return current_articles
|
||||
|
||||
def parse_ed_section(self, url):
|
||||
self.get_fetchdate()
|
||||
soup = self.index_to_soup(url)
|
||||
@ -338,7 +451,12 @@ class MPHKRecipe(BasicNewsRecipe):
|
||||
if dir is None:
|
||||
dir = self.output_dir
|
||||
if __UseChineseTitle__ == True:
|
||||
title = u'\u660e\u5831 (\u9999\u6e2f)'
|
||||
if __Region__ == 'Hong Kong':
|
||||
title = u'\u660e\u5831 (\u9999\u6e2f)'
|
||||
elif __Region__ == 'Vancouver':
|
||||
title = u'\u660e\u5831 (\u6eab\u54e5\u83ef)'
|
||||
elif __Region__ == 'Toronto':
|
||||
title = u'\u660e\u5831 (\u591a\u502b\u591a)'
|
||||
else:
|
||||
title = self.short_title()
|
||||
# if not generating a periodical, force date to apply in title
|
||||
|
594
recipes/ming_pao_toronto.recipe
Normal file
594
recipes/ming_pao_toronto.recipe
Normal file
@ -0,0 +1,594 @@
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010-2011, Eddie Lau'
|
||||
|
||||
# Region - Hong Kong, Vancouver, Toronto
|
||||
__Region__ = 'Toronto'
|
||||
# Users of Kindle 3 with limited system-level CJK support
|
||||
# please replace the following "True" with "False".
|
||||
__MakePeriodical__ = True
|
||||
# Turn below to true if your device supports display of CJK titles
|
||||
__UseChineseTitle__ = False
|
||||
# Set it to False if you want to skip images
|
||||
__KeepImages__ = True
|
||||
# (HK only) Turn below to true if you wish to use life.mingpao.com as the main article source
|
||||
__UseLife__ = True
|
||||
|
||||
|
||||
'''
|
||||
Change Log:
|
||||
2011/06/26: add fetching Vancouver and Toronto versions of the paper, also provide captions for images using life.mingpao fetch source
|
||||
provide options to remove all images in the file
|
||||
2011/05/12: switch the main parse source to life.mingpao.com, which has more photos on the article pages
|
||||
2011/03/06: add new articles for finance section, also a new section "Columns"
|
||||
2011/02/28: rearrange the sections
|
||||
[Disabled until Kindle has better CJK support and can remember last (section,article) read in Sections & Articles
|
||||
View] make it the same title if generating a periodical, so past issue will be automatically put into "Past Issues"
|
||||
folder in Kindle 3
|
||||
2011/02/20: skip duplicated links in finance section, put photos which may extend a whole page to the back of the articles
|
||||
clean up the indentation
|
||||
2010/12/07: add entertainment section, use newspaper front page as ebook cover, suppress date display in section list
|
||||
(to avoid wrong date display in case the user generates the ebook in a time zone different from HKT)
|
||||
2010/11/22: add English section, remove eco-news section which is not updated daily, correct
|
||||
ordering of articles
|
||||
2010/11/12: add news image and eco-news section
|
||||
2010/11/08: add parsing of finance section
|
||||
2010/11/06: temporary work-around for Kindle device having no capability to display unicode
|
||||
in section/article list.
|
||||
2010/10/31: skip repeated articles in section pages
|
||||
'''
|
||||
|
||||
import os, datetime, re
|
||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||
from contextlib import nested
|
||||
from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
||||
from calibre.ebooks.metadata.opf2 import OPFCreator
|
||||
from calibre.ebooks.metadata.toc import TOC
|
||||
from calibre.ebooks.metadata import MetaInformation
|
||||
|
||||
# MAIN CLASS
|
||||
class MPRecipe(BasicNewsRecipe):
|
||||
if __Region__ == 'Hong Kong':
|
||||
title = 'Ming Pao - Hong Kong'
|
||||
description = 'Hong Kong Chinese Newspaper (http://news.mingpao.com)'
|
||||
category = 'Chinese, News, Hong Kong'
|
||||
extra_css = 'img {display: block; margin-left: auto; margin-right: auto; margin-top: 10px; margin-bottom: 10px;} font>b {font-size:200%; font-weight:bold;}'
|
||||
masthead_url = 'http://news.mingpao.com/image/portals_top_logo_news.gif'
|
||||
keep_only_tags = [dict(name='h1'),
|
||||
dict(name='font', attrs={'style':['font-size:14pt; line-height:160%;']}), # for entertainment page title
|
||||
dict(name='font', attrs={'color':['AA0000']}), # for column articles title
|
||||
dict(attrs={'id':['newscontent']}), # entertainment and column page content
|
||||
dict(attrs={'id':['newscontent01','newscontent02']}),
|
||||
dict(attrs={'class':['photo']}),
|
||||
dict(name='table', attrs={'width':['100%'], 'border':['0'], 'cellspacing':['5'], 'cellpadding':['0']}), # content in printed version of life.mingpao.com
|
||||
dict(name='img', attrs={'width':['180'], 'alt':['按圖放大']}) # images for source from life.mingpao.com
|
||||
]
|
||||
if __KeepImages__:
|
||||
remove_tags = [dict(name='style'),
|
||||
dict(attrs={'id':['newscontent135']}), # for the finance page from mpfinance.com
|
||||
dict(name='font', attrs={'size':['2'], 'color':['666666']}), # article date in life.mingpao.com article
|
||||
#dict(name='table') # for content fetched from life.mingpao.com
|
||||
]
|
||||
else:
|
||||
remove_tags = [dict(name='style'),
|
||||
dict(attrs={'id':['newscontent135']}), # for the finance page from mpfinance.com
|
||||
dict(name='font', attrs={'size':['2'], 'color':['666666']}), # article date in life.mingpao.com article
|
||||
dict(name='img'),
|
||||
#dict(name='table') # for content fetched from life.mingpao.com
|
||||
]
|
||||
remove_attributes = ['width']
|
||||
preprocess_regexps = [
|
||||
(re.compile(r'<h5>', re.DOTALL|re.IGNORECASE),
|
||||
lambda match: '<h1>'),
|
||||
(re.compile(r'</h5>', re.DOTALL|re.IGNORECASE),
|
||||
lambda match: '</h1>'),
|
||||
(re.compile(r'<p><a href=.+?</a></p>', re.DOTALL|re.IGNORECASE), # for entertainment page
|
||||
lambda match: ''),
|
||||
# skip <br> after title in life.mingpao.com fetched article
|
||||
(re.compile(r"<div id='newscontent'><br>", re.DOTALL|re.IGNORECASE),
|
||||
lambda match: "<div id='newscontent'>"),
|
||||
(re.compile(r"<br><br></b>", re.DOTALL|re.IGNORECASE),
|
||||
lambda match: "</b>")
|
||||
]
|
||||
elif __Region__ == 'Vancouver':
|
||||
title = 'Ming Pao - Vancouver'
|
||||
description = 'Vancouver Chinese Newspaper (http://www.mingpaovan.com)'
|
||||
category = 'Chinese, News, Vancouver'
|
||||
extra_css = 'img {display: block; margin-left: auto; margin-right: auto; margin-top: 10px; margin-bottom: 10px;} b>font {font-size:200%; font-weight:bold;}'
|
||||
masthead_url = 'http://www.mingpaovan.com/image/mainlogo2_VAN2.gif'
|
||||
keep_only_tags = [dict(name='table', attrs={'width':['450'], 'border':['0'], 'cellspacing':['0'], 'cellpadding':['1']}),
|
||||
dict(name='table', attrs={'width':['450'], 'border':['0'], 'cellspacing':['3'], 'cellpadding':['3'], 'id':['tblContent3']}),
|
||||
dict(name='table', attrs={'width':['180'], 'border':['0'], 'cellspacing':['0'], 'cellpadding':['0'], 'bgcolor':['F0F0F0']}),
|
||||
]
|
||||
if __KeepImages__:
|
||||
remove_tags = [dict(name='img', attrs={'src':['../../../image/magnifier.gif']})] # the magnifier icon
|
||||
else:
|
||||
remove_tags = [dict(name='img')]
|
||||
remove_attributes = ['width']
|
||||
preprocess_regexps = [(re.compile(r' ', re.DOTALL|re.IGNORECASE),
|
||||
lambda match: ''),
|
||||
]
|
||||
elif __Region__ == 'Toronto':
|
||||
title = 'Ming Pao - Toronto'
|
||||
description = 'Toronto Chinese Newspaper (http://www.mingpaotor.com)'
|
||||
category = 'Chinese, News, Toronto'
|
||||
extra_css = 'img {display: block; margin-left: auto; margin-right: auto; margin-top: 10px; margin-bottom: 10px;} b>font {font-size:200%; font-weight:bold;}'
|
||||
masthead_url = 'http://www.mingpaotor.com/image/mainlogo2_TOR2.gif'
|
||||
keep_only_tags = [dict(name='table', attrs={'width':['450'], 'border':['0'], 'cellspacing':['0'], 'cellpadding':['1']}),
|
||||
dict(name='table', attrs={'width':['450'], 'border':['0'], 'cellspacing':['3'], 'cellpadding':['3'], 'id':['tblContent3']}),
|
||||
dict(name='table', attrs={'width':['180'], 'border':['0'], 'cellspacing':['0'], 'cellpadding':['0'], 'bgcolor':['F0F0F0']}),
|
||||
]
|
||||
if __KeepImages__:
|
||||
remove_tags = [dict(name='img', attrs={'src':['../../../image/magnifier.gif']})] # the magnifier icon
|
||||
else:
|
||||
remove_tags = [dict(name='img')]
|
||||
remove_attributes = ['width']
|
||||
preprocess_regexps = [(re.compile(r' ', re.DOTALL|re.IGNORECASE),
|
||||
lambda match: ''),
|
||||
]
|
||||
|
||||
oldest_article = 1
|
||||
max_articles_per_feed = 100
|
||||
__author__ = 'Eddie Lau'
|
||||
publisher = 'MingPao'
|
||||
remove_javascript = True
|
||||
use_embedded_content = False
|
||||
no_stylesheets = True
|
||||
language = 'zh'
|
||||
encoding = 'Big5-HKSCS'
|
||||
recursions = 0
|
||||
conversion_options = {'linearize_tables':True}
|
||||
timefmt = ''
|
||||
|
||||
def image_url_processor(cls, baseurl, url):
|
||||
# trick: break the url at the first occurance of digit, add an additional
|
||||
# '_' at the front
|
||||
# not working, may need to move this to preprocess_html() method
|
||||
# minIdx = 10000
|
||||
# i0 = url.find('0')
|
||||
# if i0 >= 0 and i0 < minIdx:
|
||||
# minIdx = i0
|
||||
# i1 = url.find('1')
|
||||
# if i1 >= 0 and i1 < minIdx:
|
||||
# minIdx = i1
|
||||
# i2 = url.find('2')
|
||||
# if i2 >= 0 and i2 < minIdx:
|
||||
# minIdx = i2
|
||||
# i3 = url.find('3')
|
||||
# if i3 >= 0 and i0 < minIdx:
|
||||
# minIdx = i3
|
||||
# i4 = url.find('4')
|
||||
# if i4 >= 0 and i4 < minIdx:
|
||||
# minIdx = i4
|
||||
# i5 = url.find('5')
|
||||
# if i5 >= 0 and i5 < minIdx:
|
||||
# minIdx = i5
|
||||
# i6 = url.find('6')
|
||||
# if i6 >= 0 and i6 < minIdx:
|
||||
# minIdx = i6
|
||||
# i7 = url.find('7')
|
||||
# if i7 >= 0 and i7 < minIdx:
|
||||
# minIdx = i7
|
||||
# i8 = url.find('8')
|
||||
# if i8 >= 0 and i8 < minIdx:
|
||||
# minIdx = i8
|
||||
# i9 = url.find('9')
|
||||
# if i9 >= 0 and i9 < minIdx:
|
||||
# minIdx = i9
|
||||
return url
|
||||
|
||||
def get_dtlocal(self):
|
||||
dt_utc = datetime.datetime.utcnow()
|
||||
if __Region__ == 'Hong Kong':
|
||||
# convert UTC to local hk time - at HKT 4.30am, all news are available
|
||||
dt_local = dt_utc + datetime.timedelta(8.0/24) - datetime.timedelta(4.5/24)
|
||||
# dt_local = dt_utc.astimezone(pytz.timezone('Asia/Hong_Kong')) - datetime.timedelta(4.5/24)
|
||||
elif __Region__ == 'Vancouver':
|
||||
# convert UTC to local Vancouver time - at PST time 4.30am, all news are available
|
||||
dt_local = dt_utc + datetime.timedelta(-8.0/24) - datetime.timedelta(4.5/24)
|
||||
#dt_local = dt_utc.astimezone(pytz.timezone('America/Vancouver')) - datetime.timedelta(4.5/24)
|
||||
elif __Region__ == 'Toronto':
|
||||
# convert UTC to local Toronto time - at EST time 4.30am, all news are available
|
||||
dt_local = dt_utc + datetime.timedelta(-5.0/24) - datetime.timedelta(4.5/24)
|
||||
#dt_local = dt_utc.astimezone(pytz.timezone('America/Toronto')) - datetime.timedelta(4.5/24)
|
||||
return dt_local
|
||||
|
||||
def get_fetchdate(self):
|
||||
return self.get_dtlocal().strftime("%Y%m%d")
|
||||
|
||||
def get_fetchformatteddate(self):
|
||||
return self.get_dtlocal().strftime("%Y-%m-%d")
|
||||
|
||||
def get_fetchday(self):
|
||||
return self.get_dtlocal().strftime("%d")
|
||||
|
||||
def get_cover_url(self):
|
||||
if __Region__ == 'Hong Kong':
|
||||
cover = 'http://news.mingpao.com/' + self.get_fetchdate() + '/' + self.get_fetchdate() + '_' + self.get_fetchday() + 'gacov.jpg'
|
||||
elif __Region__ == 'Vancouver':
|
||||
cover = 'http://www.mingpaovan.com/ftp/News/' + self.get_fetchdate() + '/' + self.get_fetchday() + 'pgva1s.jpg'
|
||||
elif __Region__ == 'Toronto':
|
||||
cover = 'http://www.mingpaotor.com/ftp/News/' + self.get_fetchdate() + '/' + self.get_fetchday() + 'pgtas.jpg'
|
||||
br = BasicNewsRecipe.get_browser()
|
||||
try:
|
||||
br.open(cover)
|
||||
except:
|
||||
cover = None
|
||||
return cover
|
||||
|
||||
def parse_index(self):
|
||||
feeds = []
|
||||
dateStr = self.get_fetchdate()
|
||||
|
||||
if __Region__ == 'Hong Kong':
|
||||
if __UseLife__:
|
||||
for title, url, keystr in [(u'\u8981\u805e Headline', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalga', 'nal'),
|
||||
(u'\u6e2f\u805e Local', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalgb', 'nal'),
|
||||
(u'\u6559\u80b2 Education', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalgf', 'nal'),
|
||||
(u'\u793e\u8a55/\u7b46\u9663 Editorial', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=nalmr', 'nal'),
|
||||
(u'\u8ad6\u58c7 Forum', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=nalfa', 'nal'),
|
||||
(u'\u4e2d\u570b China', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=nalca', 'nal'),
|
||||
(u'\u570b\u969b World', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=nalta', 'nal'),
|
||||
(u'\u7d93\u6fdf Finance', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalea', 'nal'),
|
||||
(u'\u9ad4\u80b2 Sport', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalsp', 'nal'),
|
||||
(u'\u5f71\u8996 Film/TV', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalma', 'nal'),
|
||||
(u'\u5c08\u6b04 Columns', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=ncolumn', 'ncl')]:
|
||||
articles = self.parse_section2(url, keystr)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
|
||||
for title, url in [(u'\u526f\u520a Supplement', 'http://news.mingpao.com/' + dateStr + '/jaindex.htm'),
|
||||
(u'\u82f1\u6587 English', 'http://news.mingpao.com/' + dateStr + '/emindex.htm')]:
|
||||
articles = self.parse_section(url)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
else:
|
||||
for title, url in [(u'\u8981\u805e Headline', 'http://news.mingpao.com/' + dateStr + '/gaindex.htm'),
|
||||
(u'\u6e2f\u805e Local', 'http://news.mingpao.com/' + dateStr + '/gbindex.htm'),
|
||||
(u'\u6559\u80b2 Education', 'http://news.mingpao.com/' + dateStr + '/gfindex.htm')]:
|
||||
articles = self.parse_section(url)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
|
||||
# special- editorial
|
||||
ed_articles = self.parse_ed_section('http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=nalmr')
|
||||
if ed_articles:
|
||||
feeds.append((u'\u793e\u8a55/\u7b46\u9663 Editorial', ed_articles))
|
||||
|
||||
for title, url in [(u'\u8ad6\u58c7 Forum', 'http://news.mingpao.com/' + dateStr + '/faindex.htm'),
|
||||
(u'\u4e2d\u570b China', 'http://news.mingpao.com/' + dateStr + '/caindex.htm'),
|
||||
(u'\u570b\u969b World', 'http://news.mingpao.com/' + dateStr + '/taindex.htm')]:
|
||||
articles = self.parse_section(url)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
|
||||
# special - finance
|
||||
#fin_articles = self.parse_fin_section('http://www.mpfinance.com/htm/Finance/' + dateStr + '/News/ea,eb,ecindex.htm')
|
||||
fin_articles = self.parse_fin_section('http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalea')
|
||||
if fin_articles:
|
||||
feeds.append((u'\u7d93\u6fdf Finance', fin_articles))
|
||||
|
||||
for title, url in [('Tech News', 'http://news.mingpao.com/' + dateStr + '/naindex.htm'),
|
||||
(u'\u9ad4\u80b2 Sport', 'http://news.mingpao.com/' + dateStr + '/spindex.htm')]:
|
||||
articles = self.parse_section(url)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
|
||||
# special - entertainment
|
||||
ent_articles = self.parse_ent_section('http://ol.mingpao.com/cfm/star1.cfm')
|
||||
if ent_articles:
|
||||
feeds.append((u'\u5f71\u8996 Film/TV', ent_articles))
|
||||
|
||||
for title, url in [(u'\u526f\u520a Supplement', 'http://news.mingpao.com/' + dateStr + '/jaindex.htm'),
|
||||
(u'\u82f1\u6587 English', 'http://news.mingpao.com/' + dateStr + '/emindex.htm')]:
|
||||
articles = self.parse_section(url)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
|
||||
|
||||
# special- columns
|
||||
col_articles = self.parse_col_section('http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=ncolumn')
|
||||
if col_articles:
|
||||
feeds.append((u'\u5c08\u6b04 Columns', col_articles))
|
||||
elif __Region__ == 'Vancouver':
|
||||
for title, url in [(u'\u8981\u805e Headline', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/VAindex.htm'),
|
||||
(u'\u52a0\u570b Canada', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/VBindex.htm'),
|
||||
(u'\u793e\u5340 Local', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/VDindex.htm'),
|
||||
(u'\u6e2f\u805e Hong Kong', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/HK-VGindex.htm'),
|
||||
(u'\u570b\u969b World', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/VTindex.htm'),
|
||||
(u'\u4e2d\u570b China', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/VCindex.htm'),
|
||||
(u'\u7d93\u6fdf Economics', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/VEindex.htm'),
|
||||
(u'\u9ad4\u80b2 Sports', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/VSindex.htm'),
|
||||
(u'\u5f71\u8996 Film/TV', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/HK-MAindex.htm'),
|
||||
(u'\u526f\u520a Supplements', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/WWindex.htm'),]:
|
||||
articles = self.parse_section3(url, 'http://www.mingpaovan.com/')
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
elif __Region__ == 'Toronto':
|
||||
for title, url in [(u'\u8981\u805e Headline', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/TAindex.htm'),
|
||||
(u'\u52a0\u570b Canada', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/TDindex.htm'),
|
||||
(u'\u793e\u5340 Local', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/TFindex.htm'),
|
||||
(u'\u4e2d\u570b China', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/TCAindex.htm'),
|
||||
(u'\u570b\u969b World', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/TTAindex.htm'),
|
||||
(u'\u6e2f\u805e Hong Kong', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/HK-GAindex.htm'),
|
||||
(u'\u7d93\u6fdf Economics', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/THindex.htm'),
|
||||
(u'\u9ad4\u80b2 Sports', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/TSindex.htm'),
|
||||
(u'\u5f71\u8996 Film/TV', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/HK-MAindex.htm'),
|
||||
(u'\u526f\u520a Supplements', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/WWindex.htm'),]:
|
||||
articles = self.parse_section3(url, 'http://www.mingpaotor.com/')
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
return feeds
|
||||
|
||||
# parse from news.mingpao.com
|
||||
def parse_section(self, url):
|
||||
dateStr = self.get_fetchdate()
|
||||
soup = self.index_to_soup(url)
|
||||
divs = soup.findAll(attrs={'class': ['bullet','bullet_grey']})
|
||||
current_articles = []
|
||||
included_urls = []
|
||||
divs.reverse()
|
||||
for i in divs:
|
||||
a = i.find('a', href = True)
|
||||
title = self.tag_to_string(a)
|
||||
url = a.get('href', False)
|
||||
url = 'http://news.mingpao.com/' + dateStr + '/' +url
|
||||
if url not in included_urls and url.rfind('Redirect') == -1:
|
||||
current_articles.append({'title': title, 'url': url, 'description':'', 'date':''})
|
||||
included_urls.append(url)
|
||||
current_articles.reverse()
|
||||
return current_articles
|
||||
|
||||
# parse from life.mingpao.com
|
||||
def parse_section2(self, url, keystr):
|
||||
self.get_fetchdate()
|
||||
soup = self.index_to_soup(url)
|
||||
a = soup.findAll('a', href=True)
|
||||
a.reverse()
|
||||
current_articles = []
|
||||
included_urls = []
|
||||
for i in a:
|
||||
title = self.tag_to_string(i)
|
||||
url = 'http://life.mingpao.com/cfm/' + i.get('href', False)
|
||||
if (url not in included_urls) and (not url.rfind('.txt') == -1) and (not url.rfind(keystr) == -1):
|
||||
url = url.replace('dailynews3.cfm', 'dailynews3a.cfm') # use printed version of the article
|
||||
current_articles.append({'title': title, 'url': url, 'description': ''})
|
||||
included_urls.append(url)
|
||||
current_articles.reverse()
|
||||
return current_articles
|
||||
|
||||
# parse from www.mingpaovan.com
|
||||
def parse_section3(self, url, baseUrl):
|
||||
self.get_fetchdate()
|
||||
soup = self.index_to_soup(url)
|
||||
divs = soup.findAll(attrs={'class': ['ListContentLargeLink']})
|
||||
current_articles = []
|
||||
included_urls = []
|
||||
divs.reverse()
|
||||
for i in divs:
|
||||
title = self.tag_to_string(i)
|
||||
urlstr = i.get('href', False)
|
||||
urlstr = baseUrl + '/' + urlstr.replace('../../../', '')
|
||||
if urlstr not in included_urls:
|
||||
current_articles.append({'title': title, 'url': urlstr, 'description': '', 'date': ''})
|
||||
included_urls.append(urlstr)
|
||||
current_articles.reverse()
|
||||
return current_articles
|
||||
|
||||
def parse_ed_section(self, url):
|
||||
self.get_fetchdate()
|
||||
soup = self.index_to_soup(url)
|
||||
a = soup.findAll('a', href=True)
|
||||
a.reverse()
|
||||
current_articles = []
|
||||
included_urls = []
|
||||
for i in a:
|
||||
title = self.tag_to_string(i)
|
||||
url = 'http://life.mingpao.com/cfm/' + i.get('href', False)
|
||||
if (url not in included_urls) and (not url.rfind('.txt') == -1) and (not url.rfind('nal') == -1):
|
||||
current_articles.append({'title': title, 'url': url, 'description': ''})
|
||||
included_urls.append(url)
|
||||
current_articles.reverse()
|
||||
return current_articles
|
||||
|
||||
def parse_fin_section(self, url):
|
||||
self.get_fetchdate()
|
||||
soup = self.index_to_soup(url)
|
||||
a = soup.findAll('a', href= True)
|
||||
current_articles = []
|
||||
included_urls = []
|
||||
for i in a:
|
||||
#url = 'http://www.mpfinance.com/cfm/' + i.get('href', False)
|
||||
url = 'http://life.mingpao.com/cfm/' + i.get('href', False)
|
||||
#if url not in included_urls and not url.rfind(dateStr) == -1 and url.rfind('index') == -1:
|
||||
if url not in included_urls and (not url.rfind('txt') == -1) and (not url.rfind('nal') == -1):
|
||||
title = self.tag_to_string(i)
|
||||
current_articles.append({'title': title, 'url': url, 'description':''})
|
||||
included_urls.append(url)
|
||||
return current_articles
|
||||
|
||||
def parse_ent_section(self, url):
|
||||
self.get_fetchdate()
|
||||
soup = self.index_to_soup(url)
|
||||
a = soup.findAll('a', href=True)
|
||||
a.reverse()
|
||||
current_articles = []
|
||||
included_urls = []
|
||||
for i in a:
|
||||
title = self.tag_to_string(i)
|
||||
url = 'http://ol.mingpao.com/cfm/' + i.get('href', False)
|
||||
if (url not in included_urls) and (not url.rfind('.txt') == -1) and (not url.rfind('star') == -1):
|
||||
current_articles.append({'title': title, 'url': url, 'description': ''})
|
||||
included_urls.append(url)
|
||||
current_articles.reverse()
|
||||
return current_articles
|
||||
|
||||
def parse_col_section(self, url):
|
||||
self.get_fetchdate()
|
||||
soup = self.index_to_soup(url)
|
||||
a = soup.findAll('a', href=True)
|
||||
a.reverse()
|
||||
current_articles = []
|
||||
included_urls = []
|
||||
for i in a:
|
||||
title = self.tag_to_string(i)
|
||||
url = 'http://life.mingpao.com/cfm/' + i.get('href', False)
|
||||
if (url not in included_urls) and (not url.rfind('.txt') == -1) and (not url.rfind('ncl') == -1):
|
||||
current_articles.append({'title': title, 'url': url, 'description': ''})
|
||||
included_urls.append(url)
|
||||
current_articles.reverse()
|
||||
return current_articles
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
for item in soup.findAll(style=True):
|
||||
del item['style']
|
||||
for item in soup.findAll(style=True):
|
||||
del item['width']
|
||||
for item in soup.findAll(stype=True):
|
||||
del item['absmiddle']
|
||||
return soup
|
||||
|
||||
def create_opf(self, feeds, dir=None):
|
||||
if dir is None:
|
||||
dir = self.output_dir
|
||||
if __UseChineseTitle__ == True:
|
||||
if __Region__ == 'Hong Kong':
|
||||
title = u'\u660e\u5831 (\u9999\u6e2f)'
|
||||
elif __Region__ == 'Vancouver':
|
||||
title = u'\u660e\u5831 (\u6eab\u54e5\u83ef)'
|
||||
elif __Region__ == 'Toronto':
|
||||
title = u'\u660e\u5831 (\u591a\u502b\u591a)'
|
||||
else:
|
||||
title = self.short_title()
|
||||
# if not generating a periodical, force date to apply in title
|
||||
if __MakePeriodical__ == False:
|
||||
title = title + ' ' + self.get_fetchformatteddate()
|
||||
if True:
|
||||
mi = MetaInformation(title, [self.publisher])
|
||||
mi.publisher = self.publisher
|
||||
mi.author_sort = self.publisher
|
||||
if __MakePeriodical__ == True:
|
||||
mi.publication_type = 'periodical:'+self.publication_type+':'+self.short_title()
|
||||
else:
|
||||
mi.publication_type = self.publication_type+':'+self.short_title()
|
||||
#mi.timestamp = nowf()
|
||||
mi.timestamp = self.get_dtlocal()
|
||||
mi.comments = self.description
|
||||
if not isinstance(mi.comments, unicode):
|
||||
mi.comments = mi.comments.decode('utf-8', 'replace')
|
||||
#mi.pubdate = nowf()
|
||||
mi.pubdate = self.get_dtlocal()
|
||||
opf_path = os.path.join(dir, 'index.opf')
|
||||
ncx_path = os.path.join(dir, 'index.ncx')
|
||||
opf = OPFCreator(dir, mi)
|
||||
# Add mastheadImage entry to <guide> section
|
||||
mp = getattr(self, 'masthead_path', None)
|
||||
if mp is not None and os.access(mp, os.R_OK):
|
||||
from calibre.ebooks.metadata.opf2 import Guide
|
||||
ref = Guide.Reference(os.path.basename(self.masthead_path), os.getcwdu())
|
||||
ref.type = 'masthead'
|
||||
ref.title = 'Masthead Image'
|
||||
opf.guide.append(ref)
|
||||
|
||||
manifest = [os.path.join(dir, 'feed_%d'%i) for i in range(len(feeds))]
|
||||
manifest.append(os.path.join(dir, 'index.html'))
|
||||
manifest.append(os.path.join(dir, 'index.ncx'))
|
||||
|
||||
# Get cover
|
||||
cpath = getattr(self, 'cover_path', None)
|
||||
if cpath is None:
|
||||
pf = open(os.path.join(dir, 'cover.jpg'), 'wb')
|
||||
if self.default_cover(pf):
|
||||
cpath = pf.name
|
||||
if cpath is not None and os.access(cpath, os.R_OK):
|
||||
opf.cover = cpath
|
||||
manifest.append(cpath)
|
||||
|
||||
# Get masthead
|
||||
mpath = getattr(self, 'masthead_path', None)
|
||||
if mpath is not None and os.access(mpath, os.R_OK):
|
||||
manifest.append(mpath)
|
||||
|
||||
opf.create_manifest_from_files_in(manifest)
|
||||
for mani in opf.manifest:
|
||||
if mani.path.endswith('.ncx'):
|
||||
mani.id = 'ncx'
|
||||
if mani.path.endswith('mastheadImage.jpg'):
|
||||
mani.id = 'masthead-image'
|
||||
entries = ['index.html']
|
||||
toc = TOC(base_path=dir)
|
||||
self.play_order_counter = 0
|
||||
self.play_order_map = {}
|
||||
|
||||
def feed_index(num, parent):
|
||||
f = feeds[num]
|
||||
for j, a in enumerate(f):
|
||||
if getattr(a, 'downloaded', False):
|
||||
adir = 'feed_%d/article_%d/'%(num, j)
|
||||
auth = a.author
|
||||
if not auth:
|
||||
auth = None
|
||||
desc = a.text_summary
|
||||
if not desc:
|
||||
desc = None
|
||||
else:
|
||||
desc = self.description_limiter(desc)
|
||||
entries.append('%sindex.html'%adir)
|
||||
po = self.play_order_map.get(entries[-1], None)
|
||||
if po is None:
|
||||
self.play_order_counter += 1
|
||||
po = self.play_order_counter
|
||||
parent.add_item('%sindex.html'%adir, None, a.title if a.title else _('Untitled Article'),
|
||||
play_order=po, author=auth, description=desc)
|
||||
last = os.path.join(self.output_dir, ('%sindex.html'%adir).replace('/', os.sep))
|
||||
for sp in a.sub_pages:
|
||||
prefix = os.path.commonprefix([opf_path, sp])
|
||||
relp = sp[len(prefix):]
|
||||
entries.append(relp.replace(os.sep, '/'))
|
||||
last = sp
|
||||
|
||||
if os.path.exists(last):
|
||||
with open(last, 'rb') as fi:
|
||||
src = fi.read().decode('utf-8')
|
||||
soup = BeautifulSoup(src)
|
||||
body = soup.find('body')
|
||||
if body is not None:
|
||||
prefix = '/'.join('..'for i in range(2*len(re.findall(r'link\d+', last))))
|
||||
templ = self.navbar.generate(True, num, j, len(f),
|
||||
not self.has_single_feed,
|
||||
a.orig_url, self.publisher, prefix=prefix,
|
||||
center=self.center_navbar)
|
||||
elem = BeautifulSoup(templ.render(doctype='xhtml').decode('utf-8')).find('div')
|
||||
body.insert(len(body.contents), elem)
|
||||
with open(last, 'wb') as fi:
|
||||
fi.write(unicode(soup).encode('utf-8'))
|
||||
if len(feeds) == 0:
|
||||
raise Exception('All feeds are empty, aborting.')
|
||||
|
||||
if len(feeds) > 1:
|
||||
for i, f in enumerate(feeds):
|
||||
entries.append('feed_%d/index.html'%i)
|
||||
po = self.play_order_map.get(entries[-1], None)
|
||||
if po is None:
|
||||
self.play_order_counter += 1
|
||||
po = self.play_order_counter
|
||||
auth = getattr(f, 'author', None)
|
||||
if not auth:
|
||||
auth = None
|
||||
desc = getattr(f, 'description', None)
|
||||
if not desc:
|
||||
desc = None
|
||||
feed_index(i, toc.add_item('feed_%d/index.html'%i, None,
|
||||
f.title, play_order=po, description=desc, author=auth))
|
||||
|
||||
else:
|
||||
entries.append('feed_%d/index.html'%0)
|
||||
feed_index(0, toc)
|
||||
|
||||
for i, p in enumerate(entries):
|
||||
entries[i] = os.path.join(dir, p.replace('/', os.sep))
|
||||
opf.create_spine(entries)
|
||||
opf.set_toc(toc)
|
||||
|
||||
with nested(open(opf_path, 'wb'), open(ncx_path, 'wb')) as (opf_file, ncx_file):
|
||||
opf.render(opf_file, ncx_file)
|
||||
|
594
recipes/ming_pao_vancouver.recipe
Normal file
594
recipes/ming_pao_vancouver.recipe
Normal file
@ -0,0 +1,594 @@
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010-2011, Eddie Lau'
|
||||
|
||||
# Region - Hong Kong, Vancouver, Toronto
|
||||
__Region__ = 'Vancouver'
|
||||
# Users of Kindle 3 with limited system-level CJK support
|
||||
# please replace the following "True" with "False".
|
||||
__MakePeriodical__ = True
|
||||
# Turn below to true if your device supports display of CJK titles
|
||||
__UseChineseTitle__ = False
|
||||
# Set it to False if you want to skip images
|
||||
__KeepImages__ = True
|
||||
# (HK only) Turn below to true if you wish to use life.mingpao.com as the main article source
|
||||
__UseLife__ = True
|
||||
|
||||
|
||||
'''
|
||||
Change Log:
|
||||
2011/06/26: add fetching Vancouver and Toronto versions of the paper, also provide captions for images using life.mingpao fetch source
|
||||
provide options to remove all images in the file
|
||||
2011/05/12: switch the main parse source to life.mingpao.com, which has more photos on the article pages
|
||||
2011/03/06: add new articles for finance section, also a new section "Columns"
|
||||
2011/02/28: rearrange the sections
|
||||
[Disabled until Kindle has better CJK support and can remember last (section,article) read in Sections & Articles
|
||||
View] make it the same title if generating a periodical, so past issue will be automatically put into "Past Issues"
|
||||
folder in Kindle 3
|
||||
2011/02/20: skip duplicated links in finance section, put photos which may extend a whole page to the back of the articles
|
||||
clean up the indentation
|
||||
2010/12/07: add entertainment section, use newspaper front page as ebook cover, suppress date display in section list
|
||||
(to avoid wrong date display in case the user generates the ebook in a time zone different from HKT)
|
||||
2010/11/22: add English section, remove eco-news section which is not updated daily, correct
|
||||
ordering of articles
|
||||
2010/11/12: add news image and eco-news section
|
||||
2010/11/08: add parsing of finance section
|
||||
2010/11/06: temporary work-around for Kindle device having no capability to display unicode
|
||||
in section/article list.
|
||||
2010/10/31: skip repeated articles in section pages
|
||||
'''
|
||||
|
||||
import os, datetime, re
|
||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||
from contextlib import nested
|
||||
from calibre.ebooks.BeautifulSoup import BeautifulSoup
|
||||
from calibre.ebooks.metadata.opf2 import OPFCreator
|
||||
from calibre.ebooks.metadata.toc import TOC
|
||||
from calibre.ebooks.metadata import MetaInformation
|
||||
|
||||
# MAIN CLASS
|
||||
class MPRecipe(BasicNewsRecipe):
|
||||
if __Region__ == 'Hong Kong':
|
||||
title = 'Ming Pao - Hong Kong'
|
||||
description = 'Hong Kong Chinese Newspaper (http://news.mingpao.com)'
|
||||
category = 'Chinese, News, Hong Kong'
|
||||
extra_css = 'img {display: block; margin-left: auto; margin-right: auto; margin-top: 10px; margin-bottom: 10px;} font>b {font-size:200%; font-weight:bold;}'
|
||||
masthead_url = 'http://news.mingpao.com/image/portals_top_logo_news.gif'
|
||||
keep_only_tags = [dict(name='h1'),
|
||||
dict(name='font', attrs={'style':['font-size:14pt; line-height:160%;']}), # for entertainment page title
|
||||
dict(name='font', attrs={'color':['AA0000']}), # for column articles title
|
||||
dict(attrs={'id':['newscontent']}), # entertainment and column page content
|
||||
dict(attrs={'id':['newscontent01','newscontent02']}),
|
||||
dict(attrs={'class':['photo']}),
|
||||
dict(name='table', attrs={'width':['100%'], 'border':['0'], 'cellspacing':['5'], 'cellpadding':['0']}), # content in printed version of life.mingpao.com
|
||||
dict(name='img', attrs={'width':['180'], 'alt':['按圖放大']}) # images for source from life.mingpao.com
|
||||
]
|
||||
if __KeepImages__:
|
||||
remove_tags = [dict(name='style'),
|
||||
dict(attrs={'id':['newscontent135']}), # for the finance page from mpfinance.com
|
||||
dict(name='font', attrs={'size':['2'], 'color':['666666']}), # article date in life.mingpao.com article
|
||||
#dict(name='table') # for content fetched from life.mingpao.com
|
||||
]
|
||||
else:
|
||||
remove_tags = [dict(name='style'),
|
||||
dict(attrs={'id':['newscontent135']}), # for the finance page from mpfinance.com
|
||||
dict(name='font', attrs={'size':['2'], 'color':['666666']}), # article date in life.mingpao.com article
|
||||
dict(name='img'),
|
||||
#dict(name='table') # for content fetched from life.mingpao.com
|
||||
]
|
||||
remove_attributes = ['width']
|
||||
preprocess_regexps = [
|
||||
(re.compile(r'<h5>', re.DOTALL|re.IGNORECASE),
|
||||
lambda match: '<h1>'),
|
||||
(re.compile(r'</h5>', re.DOTALL|re.IGNORECASE),
|
||||
lambda match: '</h1>'),
|
||||
(re.compile(r'<p><a href=.+?</a></p>', re.DOTALL|re.IGNORECASE), # for entertainment page
|
||||
lambda match: ''),
|
||||
# skip <br> after title in life.mingpao.com fetched article
|
||||
(re.compile(r"<div id='newscontent'><br>", re.DOTALL|re.IGNORECASE),
|
||||
lambda match: "<div id='newscontent'>"),
|
||||
(re.compile(r"<br><br></b>", re.DOTALL|re.IGNORECASE),
|
||||
lambda match: "</b>")
|
||||
]
|
||||
elif __Region__ == 'Vancouver':
|
||||
title = 'Ming Pao - Vancouver'
|
||||
description = 'Vancouver Chinese Newspaper (http://www.mingpaovan.com)'
|
||||
category = 'Chinese, News, Vancouver'
|
||||
extra_css = 'img {display: block; margin-left: auto; margin-right: auto; margin-top: 10px; margin-bottom: 10px;} b>font {font-size:200%; font-weight:bold;}'
|
||||
masthead_url = 'http://www.mingpaovan.com/image/mainlogo2_VAN2.gif'
|
||||
keep_only_tags = [dict(name='table', attrs={'width':['450'], 'border':['0'], 'cellspacing':['0'], 'cellpadding':['1']}),
|
||||
dict(name='table', attrs={'width':['450'], 'border':['0'], 'cellspacing':['3'], 'cellpadding':['3'], 'id':['tblContent3']}),
|
||||
dict(name='table', attrs={'width':['180'], 'border':['0'], 'cellspacing':['0'], 'cellpadding':['0'], 'bgcolor':['F0F0F0']}),
|
||||
]
|
||||
if __KeepImages__:
|
||||
remove_tags = [dict(name='img', attrs={'src':['../../../image/magnifier.gif']})] # the magnifier icon
|
||||
else:
|
||||
remove_tags = [dict(name='img')]
|
||||
remove_attributes = ['width']
|
||||
preprocess_regexps = [(re.compile(r' ', re.DOTALL|re.IGNORECASE),
|
||||
lambda match: ''),
|
||||
]
|
||||
elif __Region__ == 'Toronto':
|
||||
title = 'Ming Pao - Toronto'
|
||||
description = 'Toronto Chinese Newspaper (http://www.mingpaotor.com)'
|
||||
category = 'Chinese, News, Toronto'
|
||||
extra_css = 'img {display: block; margin-left: auto; margin-right: auto; margin-top: 10px; margin-bottom: 10px;} b>font {font-size:200%; font-weight:bold;}'
|
||||
masthead_url = 'http://www.mingpaotor.com/image/mainlogo2_TOR2.gif'
|
||||
keep_only_tags = [dict(name='table', attrs={'width':['450'], 'border':['0'], 'cellspacing':['0'], 'cellpadding':['1']}),
|
||||
dict(name='table', attrs={'width':['450'], 'border':['0'], 'cellspacing':['3'], 'cellpadding':['3'], 'id':['tblContent3']}),
|
||||
dict(name='table', attrs={'width':['180'], 'border':['0'], 'cellspacing':['0'], 'cellpadding':['0'], 'bgcolor':['F0F0F0']}),
|
||||
]
|
||||
if __KeepImages__:
|
||||
remove_tags = [dict(name='img', attrs={'src':['../../../image/magnifier.gif']})] # the magnifier icon
|
||||
else:
|
||||
remove_tags = [dict(name='img')]
|
||||
remove_attributes = ['width']
|
||||
preprocess_regexps = [(re.compile(r' ', re.DOTALL|re.IGNORECASE),
|
||||
lambda match: ''),
|
||||
]
|
||||
|
||||
oldest_article = 1
|
||||
max_articles_per_feed = 100
|
||||
__author__ = 'Eddie Lau'
|
||||
publisher = 'MingPao'
|
||||
remove_javascript = True
|
||||
use_embedded_content = False
|
||||
no_stylesheets = True
|
||||
language = 'zh'
|
||||
encoding = 'Big5-HKSCS'
|
||||
recursions = 0
|
||||
conversion_options = {'linearize_tables':True}
|
||||
timefmt = ''
|
||||
|
||||
def image_url_processor(cls, baseurl, url):
|
||||
# trick: break the url at the first occurance of digit, add an additional
|
||||
# '_' at the front
|
||||
# not working, may need to move this to preprocess_html() method
|
||||
# minIdx = 10000
|
||||
# i0 = url.find('0')
|
||||
# if i0 >= 0 and i0 < minIdx:
|
||||
# minIdx = i0
|
||||
# i1 = url.find('1')
|
||||
# if i1 >= 0 and i1 < minIdx:
|
||||
# minIdx = i1
|
||||
# i2 = url.find('2')
|
||||
# if i2 >= 0 and i2 < minIdx:
|
||||
# minIdx = i2
|
||||
# i3 = url.find('3')
|
||||
# if i3 >= 0 and i0 < minIdx:
|
||||
# minIdx = i3
|
||||
# i4 = url.find('4')
|
||||
# if i4 >= 0 and i4 < minIdx:
|
||||
# minIdx = i4
|
||||
# i5 = url.find('5')
|
||||
# if i5 >= 0 and i5 < minIdx:
|
||||
# minIdx = i5
|
||||
# i6 = url.find('6')
|
||||
# if i6 >= 0 and i6 < minIdx:
|
||||
# minIdx = i6
|
||||
# i7 = url.find('7')
|
||||
# if i7 >= 0 and i7 < minIdx:
|
||||
# minIdx = i7
|
||||
# i8 = url.find('8')
|
||||
# if i8 >= 0 and i8 < minIdx:
|
||||
# minIdx = i8
|
||||
# i9 = url.find('9')
|
||||
# if i9 >= 0 and i9 < minIdx:
|
||||
# minIdx = i9
|
||||
return url
|
||||
|
||||
def get_dtlocal(self):
|
||||
dt_utc = datetime.datetime.utcnow()
|
||||
if __Region__ == 'Hong Kong':
|
||||
# convert UTC to local hk time - at HKT 4.30am, all news are available
|
||||
dt_local = dt_utc + datetime.timedelta(8.0/24) - datetime.timedelta(4.5/24)
|
||||
# dt_local = dt_utc.astimezone(pytz.timezone('Asia/Hong_Kong')) - datetime.timedelta(4.5/24)
|
||||
elif __Region__ == 'Vancouver':
|
||||
# convert UTC to local Vancouver time - at PST time 4.30am, all news are available
|
||||
dt_local = dt_utc + datetime.timedelta(-8.0/24) - datetime.timedelta(4.5/24)
|
||||
#dt_local = dt_utc.astimezone(pytz.timezone('America/Vancouver')) - datetime.timedelta(4.5/24)
|
||||
elif __Region__ == 'Toronto':
|
||||
# convert UTC to local Toronto time - at EST time 4.30am, all news are available
|
||||
dt_local = dt_utc + datetime.timedelta(-5.0/24) - datetime.timedelta(4.5/24)
|
||||
#dt_local = dt_utc.astimezone(pytz.timezone('America/Toronto')) - datetime.timedelta(4.5/24)
|
||||
return dt_local
|
||||
|
||||
def get_fetchdate(self):
|
||||
return self.get_dtlocal().strftime("%Y%m%d")
|
||||
|
||||
def get_fetchformatteddate(self):
|
||||
return self.get_dtlocal().strftime("%Y-%m-%d")
|
||||
|
||||
def get_fetchday(self):
|
||||
return self.get_dtlocal().strftime("%d")
|
||||
|
||||
def get_cover_url(self):
|
||||
if __Region__ == 'Hong Kong':
|
||||
cover = 'http://news.mingpao.com/' + self.get_fetchdate() + '/' + self.get_fetchdate() + '_' + self.get_fetchday() + 'gacov.jpg'
|
||||
elif __Region__ == 'Vancouver':
|
||||
cover = 'http://www.mingpaovan.com/ftp/News/' + self.get_fetchdate() + '/' + self.get_fetchday() + 'pgva1s.jpg'
|
||||
elif __Region__ == 'Toronto':
|
||||
cover = 'http://www.mingpaotor.com/ftp/News/' + self.get_fetchdate() + '/' + self.get_fetchday() + 'pgtas.jpg'
|
||||
br = BasicNewsRecipe.get_browser()
|
||||
try:
|
||||
br.open(cover)
|
||||
except:
|
||||
cover = None
|
||||
return cover
|
||||
|
||||
def parse_index(self):
|
||||
feeds = []
|
||||
dateStr = self.get_fetchdate()
|
||||
|
||||
if __Region__ == 'Hong Kong':
|
||||
if __UseLife__:
|
||||
for title, url, keystr in [(u'\u8981\u805e Headline', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalga', 'nal'),
|
||||
(u'\u6e2f\u805e Local', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalgb', 'nal'),
|
||||
(u'\u6559\u80b2 Education', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalgf', 'nal'),
|
||||
(u'\u793e\u8a55/\u7b46\u9663 Editorial', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=nalmr', 'nal'),
|
||||
(u'\u8ad6\u58c7 Forum', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=nalfa', 'nal'),
|
||||
(u'\u4e2d\u570b China', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=nalca', 'nal'),
|
||||
(u'\u570b\u969b World', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=nalta', 'nal'),
|
||||
(u'\u7d93\u6fdf Finance', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalea', 'nal'),
|
||||
(u'\u9ad4\u80b2 Sport', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalsp', 'nal'),
|
||||
(u'\u5f71\u8996 Film/TV', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalma', 'nal'),
|
||||
(u'\u5c08\u6b04 Columns', 'http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=ncolumn', 'ncl')]:
|
||||
articles = self.parse_section2(url, keystr)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
|
||||
for title, url in [(u'\u526f\u520a Supplement', 'http://news.mingpao.com/' + dateStr + '/jaindex.htm'),
|
||||
(u'\u82f1\u6587 English', 'http://news.mingpao.com/' + dateStr + '/emindex.htm')]:
|
||||
articles = self.parse_section(url)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
else:
|
||||
for title, url in [(u'\u8981\u805e Headline', 'http://news.mingpao.com/' + dateStr + '/gaindex.htm'),
|
||||
(u'\u6e2f\u805e Local', 'http://news.mingpao.com/' + dateStr + '/gbindex.htm'),
|
||||
(u'\u6559\u80b2 Education', 'http://news.mingpao.com/' + dateStr + '/gfindex.htm')]:
|
||||
articles = self.parse_section(url)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
|
||||
# special- editorial
|
||||
ed_articles = self.parse_ed_section('http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=nalmr')
|
||||
if ed_articles:
|
||||
feeds.append((u'\u793e\u8a55/\u7b46\u9663 Editorial', ed_articles))
|
||||
|
||||
for title, url in [(u'\u8ad6\u58c7 Forum', 'http://news.mingpao.com/' + dateStr + '/faindex.htm'),
|
||||
(u'\u4e2d\u570b China', 'http://news.mingpao.com/' + dateStr + '/caindex.htm'),
|
||||
(u'\u570b\u969b World', 'http://news.mingpao.com/' + dateStr + '/taindex.htm')]:
|
||||
articles = self.parse_section(url)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
|
||||
# special - finance
|
||||
#fin_articles = self.parse_fin_section('http://www.mpfinance.com/htm/Finance/' + dateStr + '/News/ea,eb,ecindex.htm')
|
||||
fin_articles = self.parse_fin_section('http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr + '&Category=nalea')
|
||||
if fin_articles:
|
||||
feeds.append((u'\u7d93\u6fdf Finance', fin_articles))
|
||||
|
||||
for title, url in [('Tech News', 'http://news.mingpao.com/' + dateStr + '/naindex.htm'),
|
||||
(u'\u9ad4\u80b2 Sport', 'http://news.mingpao.com/' + dateStr + '/spindex.htm')]:
|
||||
articles = self.parse_section(url)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
|
||||
# special - entertainment
|
||||
ent_articles = self.parse_ent_section('http://ol.mingpao.com/cfm/star1.cfm')
|
||||
if ent_articles:
|
||||
feeds.append((u'\u5f71\u8996 Film/TV', ent_articles))
|
||||
|
||||
for title, url in [(u'\u526f\u520a Supplement', 'http://news.mingpao.com/' + dateStr + '/jaindex.htm'),
|
||||
(u'\u82f1\u6587 English', 'http://news.mingpao.com/' + dateStr + '/emindex.htm')]:
|
||||
articles = self.parse_section(url)
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
|
||||
|
||||
# special- columns
|
||||
col_articles = self.parse_col_section('http://life.mingpao.com/cfm/dailynews2.cfm?Issue=' + dateStr +'&Category=ncolumn')
|
||||
if col_articles:
|
||||
feeds.append((u'\u5c08\u6b04 Columns', col_articles))
|
||||
elif __Region__ == 'Vancouver':
|
||||
for title, url in [(u'\u8981\u805e Headline', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/VAindex.htm'),
|
||||
(u'\u52a0\u570b Canada', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/VBindex.htm'),
|
||||
(u'\u793e\u5340 Local', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/VDindex.htm'),
|
||||
(u'\u6e2f\u805e Hong Kong', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/HK-VGindex.htm'),
|
||||
(u'\u570b\u969b World', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/VTindex.htm'),
|
||||
(u'\u4e2d\u570b China', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/VCindex.htm'),
|
||||
(u'\u7d93\u6fdf Economics', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/VEindex.htm'),
|
||||
(u'\u9ad4\u80b2 Sports', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/VSindex.htm'),
|
||||
(u'\u5f71\u8996 Film/TV', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/HK-MAindex.htm'),
|
||||
(u'\u526f\u520a Supplements', 'http://www.mingpaovan.com/htm/News/' + dateStr + '/WWindex.htm'),]:
|
||||
articles = self.parse_section3(url, 'http://www.mingpaovan.com/')
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
elif __Region__ == 'Toronto':
|
||||
for title, url in [(u'\u8981\u805e Headline', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/TAindex.htm'),
|
||||
(u'\u52a0\u570b Canada', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/TDindex.htm'),
|
||||
(u'\u793e\u5340 Local', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/TFindex.htm'),
|
||||
(u'\u4e2d\u570b China', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/TCAindex.htm'),
|
||||
(u'\u570b\u969b World', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/TTAindex.htm'),
|
||||
(u'\u6e2f\u805e Hong Kong', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/HK-GAindex.htm'),
|
||||
(u'\u7d93\u6fdf Economics', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/THindex.htm'),
|
||||
(u'\u9ad4\u80b2 Sports', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/TSindex.htm'),
|
||||
(u'\u5f71\u8996 Film/TV', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/HK-MAindex.htm'),
|
||||
(u'\u526f\u520a Supplements', 'http://www.mingpaotor.com/htm/News/' + dateStr + '/WWindex.htm'),]:
|
||||
articles = self.parse_section3(url, 'http://www.mingpaotor.com/')
|
||||
if articles:
|
||||
feeds.append((title, articles))
|
||||
return feeds
|
||||
|
||||
# parse from news.mingpao.com
|
||||
def parse_section(self, url):
|
||||
dateStr = self.get_fetchdate()
|
||||
soup = self.index_to_soup(url)
|
||||
divs = soup.findAll(attrs={'class': ['bullet','bullet_grey']})
|
||||
current_articles = []
|
||||
included_urls = []
|
||||
divs.reverse()
|
||||
for i in divs:
|
||||
a = i.find('a', href = True)
|
||||
title = self.tag_to_string(a)
|
||||
url = a.get('href', False)
|
||||
url = 'http://news.mingpao.com/' + dateStr + '/' +url
|
||||
if url not in included_urls and url.rfind('Redirect') == -1:
|
||||
current_articles.append({'title': title, 'url': url, 'description':'', 'date':''})
|
||||
included_urls.append(url)
|
||||
current_articles.reverse()
|
||||
return current_articles
|
||||
|
||||
# parse from life.mingpao.com
|
||||
def parse_section2(self, url, keystr):
|
||||
self.get_fetchdate()
|
||||
soup = self.index_to_soup(url)
|
||||
a = soup.findAll('a', href=True)
|
||||
a.reverse()
|
||||
current_articles = []
|
||||
included_urls = []
|
||||
for i in a:
|
||||
title = self.tag_to_string(i)
|
||||
url = 'http://life.mingpao.com/cfm/' + i.get('href', False)
|
||||
if (url not in included_urls) and (not url.rfind('.txt') == -1) and (not url.rfind(keystr) == -1):
|
||||
url = url.replace('dailynews3.cfm', 'dailynews3a.cfm') # use printed version of the article
|
||||
current_articles.append({'title': title, 'url': url, 'description': ''})
|
||||
included_urls.append(url)
|
||||
current_articles.reverse()
|
||||
return current_articles
|
||||
|
||||
# parse from www.mingpaovan.com
|
||||
def parse_section3(self, url, baseUrl):
|
||||
self.get_fetchdate()
|
||||
soup = self.index_to_soup(url)
|
||||
divs = soup.findAll(attrs={'class': ['ListContentLargeLink']})
|
||||
current_articles = []
|
||||
included_urls = []
|
||||
divs.reverse()
|
||||
for i in divs:
|
||||
title = self.tag_to_string(i)
|
||||
urlstr = i.get('href', False)
|
||||
urlstr = baseUrl + '/' + urlstr.replace('../../../', '')
|
||||
if urlstr not in included_urls:
|
||||
current_articles.append({'title': title, 'url': urlstr, 'description': '', 'date': ''})
|
||||
included_urls.append(urlstr)
|
||||
current_articles.reverse()
|
||||
return current_articles
|
||||
|
||||
def parse_ed_section(self, url):
|
||||
self.get_fetchdate()
|
||||
soup = self.index_to_soup(url)
|
||||
a = soup.findAll('a', href=True)
|
||||
a.reverse()
|
||||
current_articles = []
|
||||
included_urls = []
|
||||
for i in a:
|
||||
title = self.tag_to_string(i)
|
||||
url = 'http://life.mingpao.com/cfm/' + i.get('href', False)
|
||||
if (url not in included_urls) and (not url.rfind('.txt') == -1) and (not url.rfind('nal') == -1):
|
||||
current_articles.append({'title': title, 'url': url, 'description': ''})
|
||||
included_urls.append(url)
|
||||
current_articles.reverse()
|
||||
return current_articles
|
||||
|
||||
def parse_fin_section(self, url):
|
||||
self.get_fetchdate()
|
||||
soup = self.index_to_soup(url)
|
||||
a = soup.findAll('a', href= True)
|
||||
current_articles = []
|
||||
included_urls = []
|
||||
for i in a:
|
||||
#url = 'http://www.mpfinance.com/cfm/' + i.get('href', False)
|
||||
url = 'http://life.mingpao.com/cfm/' + i.get('href', False)
|
||||
#if url not in included_urls and not url.rfind(dateStr) == -1 and url.rfind('index') == -1:
|
||||
if url not in included_urls and (not url.rfind('txt') == -1) and (not url.rfind('nal') == -1):
|
||||
title = self.tag_to_string(i)
|
||||
current_articles.append({'title': title, 'url': url, 'description':''})
|
||||
included_urls.append(url)
|
||||
return current_articles
|
||||
|
||||
def parse_ent_section(self, url):
|
||||
self.get_fetchdate()
|
||||
soup = self.index_to_soup(url)
|
||||
a = soup.findAll('a', href=True)
|
||||
a.reverse()
|
||||
current_articles = []
|
||||
included_urls = []
|
||||
for i in a:
|
||||
title = self.tag_to_string(i)
|
||||
url = 'http://ol.mingpao.com/cfm/' + i.get('href', False)
|
||||
if (url not in included_urls) and (not url.rfind('.txt') == -1) and (not url.rfind('star') == -1):
|
||||
current_articles.append({'title': title, 'url': url, 'description': ''})
|
||||
included_urls.append(url)
|
||||
current_articles.reverse()
|
||||
return current_articles
|
||||
|
||||
def parse_col_section(self, url):
|
||||
self.get_fetchdate()
|
||||
soup = self.index_to_soup(url)
|
||||
a = soup.findAll('a', href=True)
|
||||
a.reverse()
|
||||
current_articles = []
|
||||
included_urls = []
|
||||
for i in a:
|
||||
title = self.tag_to_string(i)
|
||||
url = 'http://life.mingpao.com/cfm/' + i.get('href', False)
|
||||
if (url not in included_urls) and (not url.rfind('.txt') == -1) and (not url.rfind('ncl') == -1):
|
||||
current_articles.append({'title': title, 'url': url, 'description': ''})
|
||||
included_urls.append(url)
|
||||
current_articles.reverse()
|
||||
return current_articles
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
for item in soup.findAll(style=True):
|
||||
del item['style']
|
||||
for item in soup.findAll(style=True):
|
||||
del item['width']
|
||||
for item in soup.findAll(stype=True):
|
||||
del item['absmiddle']
|
||||
return soup
|
||||
|
||||
def create_opf(self, feeds, dir=None):
|
||||
if dir is None:
|
||||
dir = self.output_dir
|
||||
if __UseChineseTitle__ == True:
|
||||
if __Region__ == 'Hong Kong':
|
||||
title = u'\u660e\u5831 (\u9999\u6e2f)'
|
||||
elif __Region__ == 'Vancouver':
|
||||
title = u'\u660e\u5831 (\u6eab\u54e5\u83ef)'
|
||||
elif __Region__ == 'Toronto':
|
||||
title = u'\u660e\u5831 (\u591a\u502b\u591a)'
|
||||
else:
|
||||
title = self.short_title()
|
||||
# if not generating a periodical, force date to apply in title
|
||||
if __MakePeriodical__ == False:
|
||||
title = title + ' ' + self.get_fetchformatteddate()
|
||||
if True:
|
||||
mi = MetaInformation(title, [self.publisher])
|
||||
mi.publisher = self.publisher
|
||||
mi.author_sort = self.publisher
|
||||
if __MakePeriodical__ == True:
|
||||
mi.publication_type = 'periodical:'+self.publication_type+':'+self.short_title()
|
||||
else:
|
||||
mi.publication_type = self.publication_type+':'+self.short_title()
|
||||
#mi.timestamp = nowf()
|
||||
mi.timestamp = self.get_dtlocal()
|
||||
mi.comments = self.description
|
||||
if not isinstance(mi.comments, unicode):
|
||||
mi.comments = mi.comments.decode('utf-8', 'replace')
|
||||
#mi.pubdate = nowf()
|
||||
mi.pubdate = self.get_dtlocal()
|
||||
opf_path = os.path.join(dir, 'index.opf')
|
||||
ncx_path = os.path.join(dir, 'index.ncx')
|
||||
opf = OPFCreator(dir, mi)
|
||||
# Add mastheadImage entry to <guide> section
|
||||
mp = getattr(self, 'masthead_path', None)
|
||||
if mp is not None and os.access(mp, os.R_OK):
|
||||
from calibre.ebooks.metadata.opf2 import Guide
|
||||
ref = Guide.Reference(os.path.basename(self.masthead_path), os.getcwdu())
|
||||
ref.type = 'masthead'
|
||||
ref.title = 'Masthead Image'
|
||||
opf.guide.append(ref)
|
||||
|
||||
manifest = [os.path.join(dir, 'feed_%d'%i) for i in range(len(feeds))]
|
||||
manifest.append(os.path.join(dir, 'index.html'))
|
||||
manifest.append(os.path.join(dir, 'index.ncx'))
|
||||
|
||||
# Get cover
|
||||
cpath = getattr(self, 'cover_path', None)
|
||||
if cpath is None:
|
||||
pf = open(os.path.join(dir, 'cover.jpg'), 'wb')
|
||||
if self.default_cover(pf):
|
||||
cpath = pf.name
|
||||
if cpath is not None and os.access(cpath, os.R_OK):
|
||||
opf.cover = cpath
|
||||
manifest.append(cpath)
|
||||
|
||||
# Get masthead
|
||||
mpath = getattr(self, 'masthead_path', None)
|
||||
if mpath is not None and os.access(mpath, os.R_OK):
|
||||
manifest.append(mpath)
|
||||
|
||||
opf.create_manifest_from_files_in(manifest)
|
||||
for mani in opf.manifest:
|
||||
if mani.path.endswith('.ncx'):
|
||||
mani.id = 'ncx'
|
||||
if mani.path.endswith('mastheadImage.jpg'):
|
||||
mani.id = 'masthead-image'
|
||||
entries = ['index.html']
|
||||
toc = TOC(base_path=dir)
|
||||
self.play_order_counter = 0
|
||||
self.play_order_map = {}
|
||||
|
||||
def feed_index(num, parent):
|
||||
f = feeds[num]
|
||||
for j, a in enumerate(f):
|
||||
if getattr(a, 'downloaded', False):
|
||||
adir = 'feed_%d/article_%d/'%(num, j)
|
||||
auth = a.author
|
||||
if not auth:
|
||||
auth = None
|
||||
desc = a.text_summary
|
||||
if not desc:
|
||||
desc = None
|
||||
else:
|
||||
desc = self.description_limiter(desc)
|
||||
entries.append('%sindex.html'%adir)
|
||||
po = self.play_order_map.get(entries[-1], None)
|
||||
if po is None:
|
||||
self.play_order_counter += 1
|
||||
po = self.play_order_counter
|
||||
parent.add_item('%sindex.html'%adir, None, a.title if a.title else _('Untitled Article'),
|
||||
play_order=po, author=auth, description=desc)
|
||||
last = os.path.join(self.output_dir, ('%sindex.html'%adir).replace('/', os.sep))
|
||||
for sp in a.sub_pages:
|
||||
prefix = os.path.commonprefix([opf_path, sp])
|
||||
relp = sp[len(prefix):]
|
||||
entries.append(relp.replace(os.sep, '/'))
|
||||
last = sp
|
||||
|
||||
if os.path.exists(last):
|
||||
with open(last, 'rb') as fi:
|
||||
src = fi.read().decode('utf-8')
|
||||
soup = BeautifulSoup(src)
|
||||
body = soup.find('body')
|
||||
if body is not None:
|
||||
prefix = '/'.join('..'for i in range(2*len(re.findall(r'link\d+', last))))
|
||||
templ = self.navbar.generate(True, num, j, len(f),
|
||||
not self.has_single_feed,
|
||||
a.orig_url, self.publisher, prefix=prefix,
|
||||
center=self.center_navbar)
|
||||
elem = BeautifulSoup(templ.render(doctype='xhtml').decode('utf-8')).find('div')
|
||||
body.insert(len(body.contents), elem)
|
||||
with open(last, 'wb') as fi:
|
||||
fi.write(unicode(soup).encode('utf-8'))
|
||||
if len(feeds) == 0:
|
||||
raise Exception('All feeds are empty, aborting.')
|
||||
|
||||
if len(feeds) > 1:
|
||||
for i, f in enumerate(feeds):
|
||||
entries.append('feed_%d/index.html'%i)
|
||||
po = self.play_order_map.get(entries[-1], None)
|
||||
if po is None:
|
||||
self.play_order_counter += 1
|
||||
po = self.play_order_counter
|
||||
auth = getattr(f, 'author', None)
|
||||
if not auth:
|
||||
auth = None
|
||||
desc = getattr(f, 'description', None)
|
||||
if not desc:
|
||||
desc = None
|
||||
feed_index(i, toc.add_item('feed_%d/index.html'%i, None,
|
||||
f.title, play_order=po, description=desc, author=auth))
|
||||
|
||||
else:
|
||||
entries.append('feed_%d/index.html'%0)
|
||||
feed_index(0, toc)
|
||||
|
||||
for i, p in enumerate(entries):
|
||||
entries[i] = os.path.join(dir, p.replace('/', os.sep))
|
||||
opf.create_spine(entries)
|
||||
opf.set_toc(toc)
|
||||
|
||||
with nested(open(opf_path, 'wb'), open(ncx_path, 'wb')) as (opf_file, ncx_file):
|
||||
opf.render(opf_file, ncx_file)
|
||||
|
@ -4,7 +4,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
__appname__ = u'calibre'
|
||||
numeric_version = (0, 8, 6)
|
||||
numeric_version = (0, 8, 7)
|
||||
__version__ = u'.'.join(map(unicode, numeric_version))
|
||||
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"
|
||||
|
||||
|
@ -1148,7 +1148,7 @@ plugins += [LookAndFeel, Behavior, Columns, Toolbar, Search, InputOptions,
|
||||
class StoreAmazonKindleStore(StoreBase):
|
||||
name = 'Amazon Kindle'
|
||||
description = u'Kindle books from Amazon.'
|
||||
actual_plugin = 'calibre.gui2.store.amazon_plugin:AmazonKindleStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.amazon_plugin:AmazonKindleStore'
|
||||
|
||||
headquarters = 'US'
|
||||
formats = ['KINDLE']
|
||||
@ -1158,7 +1158,7 @@ class StoreAmazonDEKindleStore(StoreBase):
|
||||
name = 'Amazon DE Kindle'
|
||||
author = 'Charles Haley'
|
||||
description = u'Kindle Bücher von Amazon.'
|
||||
actual_plugin = 'calibre.gui2.store.amazon_de_plugin:AmazonDEKindleStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.amazon_de_plugin:AmazonDEKindleStore'
|
||||
|
||||
headquarters = 'DE'
|
||||
formats = ['KINDLE']
|
||||
@ -1168,7 +1168,7 @@ class StoreAmazonUKKindleStore(StoreBase):
|
||||
name = 'Amazon UK Kindle'
|
||||
author = 'Charles Haley'
|
||||
description = u'Kindle books from Amazon\'s UK web site. Also, includes French language ebooks.'
|
||||
actual_plugin = 'calibre.gui2.store.amazon_uk_plugin:AmazonUKKindleStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.amazon_uk_plugin:AmazonUKKindleStore'
|
||||
|
||||
headquarters = 'UK'
|
||||
formats = ['KINDLE']
|
||||
@ -1177,7 +1177,7 @@ class StoreAmazonUKKindleStore(StoreBase):
|
||||
class StoreArchiveOrgStore(StoreBase):
|
||||
name = 'Archive.org'
|
||||
description = u'An Internet library offering permanent access for researchers, historians, scholars, people with disabilities, and the general public to historical collections that exist in digital format.'
|
||||
actual_plugin = 'calibre.gui2.store.archive_org_plugin:ArchiveOrgStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.archive_org_plugin:ArchiveOrgStore'
|
||||
|
||||
drm_free_only = True
|
||||
headquarters = 'US'
|
||||
@ -1186,7 +1186,7 @@ class StoreArchiveOrgStore(StoreBase):
|
||||
class StoreBaenWebScriptionStore(StoreBase):
|
||||
name = 'Baen WebScription'
|
||||
description = u'Sci-Fi & Fantasy brought to you by Jim Baen.'
|
||||
actual_plugin = 'calibre.gui2.store.baen_webscription_plugin:BaenWebScriptionStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.baen_webscription_plugin:BaenWebScriptionStore'
|
||||
|
||||
drm_free_only = True
|
||||
headquarters = 'US'
|
||||
@ -1195,7 +1195,7 @@ class StoreBaenWebScriptionStore(StoreBase):
|
||||
class StoreBNStore(StoreBase):
|
||||
name = 'Barnes and Noble'
|
||||
description = u'The world\'s largest book seller. As the ultimate destination for book lovers, Barnes & Noble.com offers an incredible array of content.'
|
||||
actual_plugin = 'calibre.gui2.store.bn_plugin:BNStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.bn_plugin:BNStore'
|
||||
|
||||
headquarters = 'US'
|
||||
formats = ['NOOK']
|
||||
@ -1205,7 +1205,7 @@ class StoreBeamEBooksDEStore(StoreBase):
|
||||
name = 'Beam EBooks DE'
|
||||
author = 'Charles Haley'
|
||||
description = u'Bei uns finden Sie: Tausende deutschsprachige eBooks; Alle eBooks ohne hartes DRM; PDF, ePub und Mobipocket Format; Sofortige Verfügbarkeit - 24 Stunden am Tag; Günstige Preise; eBooks für viele Lesegeräte, PC,Mac und Smartphones; Viele Gratis eBooks'
|
||||
actual_plugin = 'calibre.gui2.store.beam_ebooks_de_plugin:BeamEBooksDEStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.beam_ebooks_de_plugin:BeamEBooksDEStore'
|
||||
|
||||
drm_free_only = True
|
||||
headquarters = 'DE'
|
||||
@ -1215,7 +1215,7 @@ class StoreBeamEBooksDEStore(StoreBase):
|
||||
class StoreBeWriteStore(StoreBase):
|
||||
name = 'BeWrite Books'
|
||||
description = u'Publishers of fine books. Highly selective and editorially driven. Does not offer: books for children or exclusively YA, erotica, swords-and-sorcery fantasy and space-opera-style science fiction. All other genres are represented.'
|
||||
actual_plugin = 'calibre.gui2.store.bewrite_plugin:BeWriteStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.bewrite_plugin:BeWriteStore'
|
||||
|
||||
drm_free_only = True
|
||||
headquarters = 'US'
|
||||
@ -1224,7 +1224,7 @@ class StoreBeWriteStore(StoreBase):
|
||||
class StoreDieselEbooksStore(StoreBase):
|
||||
name = 'Diesel eBooks'
|
||||
description = u'Instant access to over 2.4 million titles from hundreds of publishers including Harlequin, HarperCollins, John Wiley & Sons, McGraw-Hill, Simon & Schuster and Random House.'
|
||||
actual_plugin = 'calibre.gui2.store.diesel_ebooks_plugin:DieselEbooksStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.diesel_ebooks_plugin:DieselEbooksStore'
|
||||
|
||||
headquarters = 'US'
|
||||
formats = ['EPUB', 'PDF']
|
||||
@ -1233,7 +1233,7 @@ class StoreDieselEbooksStore(StoreBase):
|
||||
class StoreEbookscomStore(StoreBase):
|
||||
name = 'eBooks.com'
|
||||
description = u'Sells books in multiple electronic formats in all categories. Technical infrastructure is cutting edge, robust and scalable, with servers in the US and Europe.'
|
||||
actual_plugin = 'calibre.gui2.store.ebooks_com_plugin:EbookscomStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.ebooks_com_plugin:EbookscomStore'
|
||||
|
||||
headquarters = 'US'
|
||||
formats = ['EPUB', 'LIT', 'MOBI', 'PDF']
|
||||
@ -1243,7 +1243,7 @@ class StoreEPubBuyDEStore(StoreBase):
|
||||
name = 'EPUBBuy DE'
|
||||
author = 'Charles Haley'
|
||||
description = u'Bei EPUBBuy.com finden Sie ausschliesslich eBooks im weitverbreiteten EPUB-Format und ohne DRM. So haben Sie die freie Wahl, wo Sie Ihr eBook lesen: Tablet, eBook-Reader, Smartphone oder einfach auf Ihrem PC. So macht eBook-Lesen Spaß!'
|
||||
actual_plugin = 'calibre.gui2.store.epubbuy_de_plugin:EPubBuyDEStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.epubbuy_de_plugin:EPubBuyDEStore'
|
||||
|
||||
drm_free_only = True
|
||||
headquarters = 'DE'
|
||||
@ -1254,7 +1254,7 @@ class StoreEBookShoppeUKStore(StoreBase):
|
||||
name = 'ebookShoppe UK'
|
||||
author = u'Charles Haley'
|
||||
description = u'We made this website in an attempt to offer the widest range of UK eBooks possible across and as many formats as we could manage.'
|
||||
actual_plugin = 'calibre.gui2.store.ebookshoppe_uk_plugin:EBookShoppeUKStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.ebookshoppe_uk_plugin:EBookShoppeUKStore'
|
||||
|
||||
headquarters = 'UK'
|
||||
formats = ['EPUB', 'PDF']
|
||||
@ -1263,7 +1263,7 @@ class StoreEBookShoppeUKStore(StoreBase):
|
||||
class StoreEHarlequinStore(StoreBase):
|
||||
name = 'eHarlequin'
|
||||
description = u'A global leader in series romance and one of the world\'s leading publishers of books for women. Offers women a broad range of reading from romance to bestseller fiction, from young adult novels to erotic literature, from nonfiction to fantasy, from African-American novels to inspirational romance, and more.'
|
||||
actual_plugin = 'calibre.gui2.store.eharlequin_plugin:EHarlequinStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.eharlequin_plugin:EHarlequinStore'
|
||||
|
||||
headquarters = 'CA'
|
||||
formats = ['EPUB', 'PDF']
|
||||
@ -1272,7 +1272,7 @@ class StoreEHarlequinStore(StoreBase):
|
||||
class StoreEpubBudStore(StoreBase):
|
||||
name = 'ePub Bud'
|
||||
description = 'Well, it\'s pretty much just "YouTube for Children\'s eBooks. A not-for-profit organization devoted to brining self published childrens books to the world.'
|
||||
actual_plugin = 'calibre.gui2.store.epubbud_plugin:EpubBudStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.epubbud_plugin:EpubBudStore'
|
||||
|
||||
drm_free_only = True
|
||||
headquarters = 'US'
|
||||
@ -1281,7 +1281,7 @@ class StoreEpubBudStore(StoreBase):
|
||||
class StoreFeedbooksStore(StoreBase):
|
||||
name = 'Feedbooks'
|
||||
description = u'Feedbooks is a cloud publishing and distribution service, connected to a large ecosystem of reading systems and social networks. Provides a variety of genres from independent and classic books.'
|
||||
actual_plugin = 'calibre.gui2.store.feedbooks_plugin:FeedbooksStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.feedbooks_plugin:FeedbooksStore'
|
||||
|
||||
headquarters = 'FR'
|
||||
formats = ['EPUB', 'MOBI', 'PDF']
|
||||
@ -1290,7 +1290,7 @@ class StoreFoylesUKStore(StoreBase):
|
||||
name = 'Foyles UK'
|
||||
author = 'Charles Haley'
|
||||
description = u'Foyles of London\'s ebook store. Provides extensive range covering all subjects.'
|
||||
actual_plugin = 'calibre.gui2.store.foyles_uk_plugin:FoylesUKStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.foyles_uk_plugin:FoylesUKStore'
|
||||
|
||||
headquarters = 'UK'
|
||||
formats = ['EPUB', 'PDF']
|
||||
@ -1300,7 +1300,7 @@ class StoreGandalfStore(StoreBase):
|
||||
name = 'Gandalf'
|
||||
author = u'Tomasz Długosz'
|
||||
description = u'Księgarnia internetowa Gandalf.'
|
||||
actual_plugin = 'calibre.gui2.store.gandalf_plugin:GandalfStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.gandalf_plugin:GandalfStore'
|
||||
|
||||
headquarters = 'PL'
|
||||
formats = ['EPUB', 'PDF']
|
||||
@ -1308,7 +1308,7 @@ class StoreGandalfStore(StoreBase):
|
||||
class StoreGoogleBooksStore(StoreBase):
|
||||
name = 'Google Books'
|
||||
description = u'Google Books'
|
||||
actual_plugin = 'calibre.gui2.store.google_books_plugin:GoogleBooksStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.google_books_plugin:GoogleBooksStore'
|
||||
|
||||
headquarters = 'US'
|
||||
formats = ['EPUB', 'PDF', 'TXT']
|
||||
@ -1316,7 +1316,7 @@ class StoreGoogleBooksStore(StoreBase):
|
||||
class StoreGutenbergStore(StoreBase):
|
||||
name = 'Project Gutenberg'
|
||||
description = u'The first producer of free ebooks. Free in the United States because their copyright has expired. They may not be free of copyright in other countries. Readers outside of the United States must check the copyright laws of their countries before downloading or redistributing our ebooks.'
|
||||
actual_plugin = 'calibre.gui2.store.gutenberg_plugin:GutenbergStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.gutenberg_plugin:GutenbergStore'
|
||||
|
||||
drm_free_only = True
|
||||
headquarters = 'US'
|
||||
@ -1325,7 +1325,7 @@ class StoreGutenbergStore(StoreBase):
|
||||
class StoreKoboStore(StoreBase):
|
||||
name = 'Kobo'
|
||||
description = u'With over 2.3 million eBooks to browse we have engaged readers in over 200 countries in Kobo eReading. Our eBook listings include New York Times Bestsellers, award winners, classics and more!'
|
||||
actual_plugin = 'calibre.gui2.store.kobo_plugin:KoboStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.kobo_plugin:KoboStore'
|
||||
|
||||
headquarters = 'CA'
|
||||
formats = ['EPUB']
|
||||
@ -1335,7 +1335,7 @@ class StoreLegimiStore(StoreBase):
|
||||
name = 'Legimi'
|
||||
author = u'Tomasz Długosz'
|
||||
description = u'Tanie oraz darmowe ebooki, egazety i blogi w formacie EPUB, wprost na Twój e-czytnik, iPhone, iPad, Android i komputer'
|
||||
actual_plugin = 'calibre.gui2.store.legimi_plugin:LegimiStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.legimi_plugin:LegimiStore'
|
||||
|
||||
headquarters = 'PL'
|
||||
formats = ['EPUB']
|
||||
@ -1344,7 +1344,7 @@ class StoreLibreDEStore(StoreBase):
|
||||
name = 'Libri DE'
|
||||
author = 'Charles Haley'
|
||||
description = u'Sicher Bücher, Hörbücher und Downloads online bestellen.'
|
||||
actual_plugin = 'calibre.gui2.store.libri_de_plugin:LibreDEStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.libri_de_plugin:LibreDEStore'
|
||||
|
||||
headquarters = 'DE'
|
||||
formats = ['EPUB', 'PDF']
|
||||
@ -1353,7 +1353,7 @@ class StoreLibreDEStore(StoreBase):
|
||||
class StoreManyBooksStore(StoreBase):
|
||||
name = 'ManyBooks'
|
||||
description = u'Public domain and creative commons works from many sources.'
|
||||
actual_plugin = 'calibre.gui2.store.manybooks_plugin:ManyBooksStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.manybooks_plugin:ManyBooksStore'
|
||||
|
||||
drm_free_only = True
|
||||
headquarters = 'US'
|
||||
@ -1362,7 +1362,7 @@ class StoreManyBooksStore(StoreBase):
|
||||
class StoreMobileReadStore(StoreBase):
|
||||
name = 'MobileRead'
|
||||
description = u'Ebooks handcrafted with the utmost care.'
|
||||
actual_plugin = 'calibre.gui2.store.mobileread.mobileread_plugin:MobileReadStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.mobileread.mobileread_plugin:MobileReadStore'
|
||||
|
||||
drm_free_only = True
|
||||
headquarters = 'CH'
|
||||
@ -1372,16 +1372,24 @@ class StoreNextoStore(StoreBase):
|
||||
name = 'Nexto'
|
||||
author = u'Tomasz Długosz'
|
||||
description = u'Największy w Polsce sklep internetowy z audiobookami mp3, ebookami pdf oraz prasą do pobrania on-line.'
|
||||
actual_plugin = 'calibre.gui2.store.nexto_plugin:NextoStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.nexto_plugin:NextoStore'
|
||||
|
||||
headquarters = 'PL'
|
||||
formats = ['EPUB', 'PDF']
|
||||
affiliate = True
|
||||
|
||||
class StoreOpenBooksStore(StoreBase):
|
||||
name = 'Open Books'
|
||||
description = u'Comprehensive listing of DRM free ebooks from a variety of sources provided by users of calibre.'
|
||||
actual_plugin = 'calibre.gui2.store.stores.open_books_plugin:OpenBooksStore'
|
||||
|
||||
drm_free_only = True
|
||||
headquarters = 'US'
|
||||
|
||||
class StoreOpenLibraryStore(StoreBase):
|
||||
name = 'Open Library'
|
||||
description = u'One web page for every book ever published. The goal is to be a true online library. Over 20 million records from a variety of large catalogs as well as single contributions, with more on the way.'
|
||||
actual_plugin = 'calibre.gui2.store.open_library_plugin:OpenLibraryStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.open_library_plugin:OpenLibraryStore'
|
||||
|
||||
drm_free_only = True
|
||||
headquarters = 'US'
|
||||
@ -1390,7 +1398,7 @@ class StoreOpenLibraryStore(StoreBase):
|
||||
class StoreOReillyStore(StoreBase):
|
||||
name = 'OReilly'
|
||||
description = u'Programming and tech ebooks from OReilly.'
|
||||
actual_plugin = 'calibre.gui2.store.oreilly_plugin:OReillyStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.oreilly_plugin:OReillyStore'
|
||||
|
||||
drm_free_only = True
|
||||
headquarters = 'US'
|
||||
@ -1399,7 +1407,7 @@ class StoreOReillyStore(StoreBase):
|
||||
class StorePragmaticBookshelfStore(StoreBase):
|
||||
name = 'Pragmatic Bookshelf'
|
||||
description = u'The Pragmatic Bookshelf\'s collection of programming and tech books avaliable as ebooks.'
|
||||
actual_plugin = 'calibre.gui2.store.pragmatic_bookshelf_plugin:PragmaticBookshelfStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.pragmatic_bookshelf_plugin:PragmaticBookshelfStore'
|
||||
|
||||
drm_free_only = True
|
||||
headquarters = 'US'
|
||||
@ -1408,7 +1416,7 @@ class StorePragmaticBookshelfStore(StoreBase):
|
||||
class StoreSmashwordsStore(StoreBase):
|
||||
name = 'Smashwords'
|
||||
description = u'An ebook publishing and distribution platform for ebook authors, publishers and readers. Covers many genres and formats.'
|
||||
actual_plugin = 'calibre.gui2.store.smashwords_plugin:SmashwordsStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.smashwords_plugin:SmashwordsStore'
|
||||
|
||||
drm_free_only = True
|
||||
headquarters = 'US'
|
||||
@ -1419,7 +1427,7 @@ class StoreVirtualoStore(StoreBase):
|
||||
name = 'Virtualo'
|
||||
author = u'Tomasz Długosz'
|
||||
description = u'Księgarnia internetowa, która oferuje bezpieczny i szeroki dostęp do książek w formie cyfrowej.'
|
||||
actual_plugin = 'calibre.gui2.store.virtualo_plugin:VirtualoStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.virtualo_plugin:VirtualoStore'
|
||||
|
||||
headquarters = 'PL'
|
||||
formats = ['EPUB', 'PDF']
|
||||
@ -1428,7 +1436,7 @@ class StoreWaterstonesUKStore(StoreBase):
|
||||
name = 'Waterstones UK'
|
||||
author = 'Charles Haley'
|
||||
description = u'Waterstone\'s mission is to be the leading Bookseller on the High Street and online providing customers the widest choice, great value and expert advice from a team passionate about Bookselling.'
|
||||
actual_plugin = 'calibre.gui2.store.waterstones_uk_plugin:WaterstonesUKStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.waterstones_uk_plugin:WaterstonesUKStore'
|
||||
|
||||
headquarters = 'UK'
|
||||
formats = ['EPUB', 'PDF']
|
||||
@ -1436,7 +1444,7 @@ class StoreWaterstonesUKStore(StoreBase):
|
||||
class StoreWeightlessBooksStore(StoreBase):
|
||||
name = 'Weightless Books'
|
||||
description = u'An independent DRM-free ebooksite devoted to ebooks of all sorts.'
|
||||
actual_plugin = 'calibre.gui2.store.weightless_books_plugin:WeightlessBooksStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.weightless_books_plugin:WeightlessBooksStore'
|
||||
|
||||
drm_free_only = True
|
||||
headquarters = 'US'
|
||||
@ -1446,7 +1454,7 @@ class StoreWHSmithUKStore(StoreBase):
|
||||
name = 'WH Smith UK'
|
||||
author = 'Charles Haley'
|
||||
description = u"Shop for savings on Books, discounted Magazine subscriptions and great prices on Stationery, Toys & Games"
|
||||
actual_plugin = 'calibre.gui2.store.whsmith_uk_plugin:WHSmithUKStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.whsmith_uk_plugin:WHSmithUKStore'
|
||||
|
||||
headquarters = 'UK'
|
||||
formats = ['EPUB', 'PDF']
|
||||
@ -1454,7 +1462,7 @@ class StoreWHSmithUKStore(StoreBase):
|
||||
class StoreWizardsTowerBooksStore(StoreBase):
|
||||
name = 'Wizards Tower Books'
|
||||
description = u'A science fiction and fantasy publisher. Concentrates mainly on making out-of-print works available once more as e-books, and helping other small presses exploit the e-book market. Also publishes a small number of limited-print-run anthologies with a view to encouraging diversity in the science fiction and fantasy field.'
|
||||
actual_plugin = 'calibre.gui2.store.wizards_tower_books_plugin:WizardsTowerBooksStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.wizards_tower_books_plugin:WizardsTowerBooksStore'
|
||||
|
||||
drm_free_only = True
|
||||
headquarters = 'UK'
|
||||
@ -1464,7 +1472,7 @@ class StoreWoblinkStore(StoreBase):
|
||||
name = 'Woblink'
|
||||
author = u'Tomasz Długosz'
|
||||
description = u'Czytanie zdarza się wszędzie!'
|
||||
actual_plugin = 'calibre.gui2.store.woblink_plugin:WoblinkStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.woblink_plugin:WoblinkStore'
|
||||
|
||||
headquarters = 'PL'
|
||||
formats = ['EPUB']
|
||||
@ -1473,7 +1481,7 @@ class StoreZixoStore(StoreBase):
|
||||
name = 'Zixo'
|
||||
author = u'Tomasz Długosz'
|
||||
description = u'Księgarnia z ebookami oraz książkami audio. Aby otwierać książki w formacie Zixo należy zainstalować program dostępny na stronie księgarni. Umożliwia on m.in. dodawanie zakładek i dostosowywanie rozmiaru czcionki.'
|
||||
actual_plugin = 'calibre.gui2.store.zixo_plugin:ZixoStore'
|
||||
actual_plugin = 'calibre.gui2.store.stores.zixo_plugin:ZixoStore'
|
||||
|
||||
headquarters = 'PL'
|
||||
formats = ['PDF, ZIXO']
|
||||
@ -1504,6 +1512,7 @@ plugins += [
|
||||
StoreManyBooksStore,
|
||||
StoreMobileReadStore,
|
||||
StoreNextoStore,
|
||||
StoreOpenBooksStore,
|
||||
StoreOpenLibraryStore,
|
||||
StoreOReillyStore,
|
||||
StorePragmaticBookshelfStore,
|
||||
|
@ -45,8 +45,11 @@ class ANDROID(USBMS):
|
||||
0xfce : { 0xd12e : [0x0100]},
|
||||
|
||||
# Google
|
||||
0x18d1 : { 0x4e11 : [0x0100, 0x226, 0x227], 0x4e12: [0x0100, 0x226,
|
||||
0x227], 0x4e21: [0x0100, 0x226, 0x227], 0xb058: [0x0222]},
|
||||
0x18d1 : {
|
||||
0x4e11 : [0x0100, 0x226, 0x227],
|
||||
0x4e12: [0x0100, 0x226, 0x227],
|
||||
0x4e21: [0x0100, 0x226, 0x227],
|
||||
0xb058: [0x0222, 0x226, 0x227]},
|
||||
|
||||
# Samsung
|
||||
0x04e8 : { 0x681d : [0x0222, 0x0223, 0x0224, 0x0400],
|
||||
@ -107,7 +110,7 @@ class ANDROID(USBMS):
|
||||
VENDOR_NAME = ['HTC', 'MOTOROLA', 'GOOGLE_', 'ANDROID', 'ACER',
|
||||
'GT-I5700', 'SAMSUNG', 'DELL', 'LINUX', 'GOOGLE', 'ARCHOS',
|
||||
'TELECHIP', 'HUAWEI', 'T-MOBILE', 'SEMC', 'LGE', 'NVIDIA',
|
||||
'GENERIC-', 'ZTE']
|
||||
'GENERIC-', 'ZTE', 'MID']
|
||||
WINDOWS_MAIN_MEM = ['ANDROID_PHONE', 'A855', 'A853', 'INC.NEXUS_ONE',
|
||||
'__UMS_COMPOSITE', '_MB200', 'MASS_STORAGE', '_-_CARD', 'SGH-I897',
|
||||
'GT-I9000', 'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID',
|
||||
@ -116,7 +119,7 @@ class ANDROID(USBMS):
|
||||
'IDEOS_TABLET', 'MYTOUCH_4G', 'UMS_COMPOSITE', 'SCH-I800_CARD',
|
||||
'7', 'A956', 'A955', 'A43', 'ANDROID_PLATFORM', 'TEGRA_2',
|
||||
'MB860', 'MULTI-CARD', 'MID7015A', 'INCREDIBLE', 'A7EB', 'STREAK',
|
||||
'MB525']
|
||||
'MB525', 'ANDROID2.3']
|
||||
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
|
||||
'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD',
|
||||
'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD',
|
||||
|
@ -20,11 +20,11 @@ class IRIVER_STORY(USBMS):
|
||||
FORMATS = ['epub', 'fb2', 'pdf', 'djvu', 'txt']
|
||||
|
||||
VENDOR_ID = [0x1006]
|
||||
PRODUCT_ID = [0x4023, 0x4024, 0x4025]
|
||||
BCD = [0x0323]
|
||||
PRODUCT_ID = [0x4023, 0x4024, 0x4025, 0x4034]
|
||||
BCD = [0x0323, 0x0326]
|
||||
|
||||
VENDOR_NAME = 'IRIVER'
|
||||
WINDOWS_MAIN_MEM = ['STORY', 'STORY_EB05', 'STORY_WI-FI']
|
||||
WINDOWS_MAIN_MEM = ['STORY', 'STORY_EB05', 'STORY_WI-FI', 'STORY_EB07']
|
||||
WINDOWS_CARD_A_MEM = ['STORY', 'STORY_SD']
|
||||
|
||||
#OSX_MAIN_MEM = 'Kindle Internal Storage Media'
|
||||
|
@ -14,7 +14,7 @@ from calibre.constants import preferred_encoding
|
||||
from calibre import isbytestring, force_unicode
|
||||
from calibre.utils.config import prefs, tweaks
|
||||
from calibre.utils.icu import strcmp
|
||||
from calibre.utils.formatter import eval_formatter
|
||||
from calibre.utils.formatter import EvalFormatter
|
||||
|
||||
class Book(Metadata):
|
||||
def __init__(self, prefix, lpath, size=None, other=None):
|
||||
@ -116,7 +116,7 @@ class CollectionsBookList(BookList):
|
||||
field_name = field_meta['name']
|
||||
else:
|
||||
field_name = ''
|
||||
cat_name = eval_formatter.safe_format(
|
||||
cat_name = EvalFormatter().safe_format(
|
||||
fmt=tweaks['sony_collection_name_template'],
|
||||
kwargs={'category':field_name, 'value':field_value},
|
||||
error_value='GET_CATEGORY', book=None)
|
||||
|
@ -351,7 +351,9 @@ class ComicInput(InputFormatPlugin):
|
||||
comics = []
|
||||
with CurrentDir(tdir):
|
||||
if not os.path.exists('comics.txt'):
|
||||
raise ValueError('%s is not a valid comic collection'
|
||||
raise ValueError((
|
||||
'%s is not a valid comic collection'
|
||||
' no comics.txt was found in the file')
|
||||
%stream.name)
|
||||
raw = open('comics.txt', 'rb').read()
|
||||
if raw.startswith(codecs.BOM_UTF16_BE):
|
||||
|
@ -17,6 +17,8 @@ def decompress_doc(data):
|
||||
return cPalmdoc.decompress(data)
|
||||
|
||||
def compress_doc(data):
|
||||
if not data:
|
||||
return u''
|
||||
return cPalmdoc.compress(data)
|
||||
|
||||
def test():
|
||||
|
@ -59,6 +59,8 @@ class CompositeProgressReporter(object):
|
||||
(self.global_max - self.global_min)
|
||||
self.global_reporter(global_frac, msg)
|
||||
|
||||
ARCHIVE_FMTS = ('zip', 'rar', 'oebzip')
|
||||
|
||||
class Plumber(object):
|
||||
'''
|
||||
The `Plumber` manages the conversion pipeline. An UI should call the methods
|
||||
@ -594,7 +596,7 @@ OptionRecommendation(name='sr3_replace',
|
||||
raise ValueError('Input file must have an extension')
|
||||
input_fmt = input_fmt[1:].lower()
|
||||
self.archive_input_tdir = None
|
||||
if input_fmt in ('zip', 'rar', 'oebzip'):
|
||||
if input_fmt in ARCHIVE_FMTS:
|
||||
self.log('Processing archive...')
|
||||
tdir = PersistentTemporaryDirectory('_plumber_archive')
|
||||
self.input, input_fmt = self.unarchive(self.input, tdir)
|
||||
|
@ -71,6 +71,7 @@ class SafeFormat(TemplateFormatter):
|
||||
return ''
|
||||
return v
|
||||
|
||||
# DEPRECATED. This is not thread safe. Do not use.
|
||||
composite_formatter = SafeFormat()
|
||||
|
||||
class Metadata(object):
|
||||
@ -111,6 +112,7 @@ class Metadata(object):
|
||||
# List of strings or []
|
||||
self.author = list(authors) if authors else []# Needed for backward compatibility
|
||||
self.authors = list(authors) if authors else []
|
||||
self.formatter = SafeFormat()
|
||||
|
||||
def is_null(self, field):
|
||||
'''
|
||||
@ -147,7 +149,7 @@ class Metadata(object):
|
||||
return val
|
||||
if val is None:
|
||||
d['#value#'] = 'RECURSIVE_COMPOSITE FIELD (Metadata) ' + field
|
||||
val = d['#value#'] = composite_formatter.safe_format(
|
||||
val = d['#value#'] = self.formatter.safe_format(
|
||||
d['display']['composite_template'],
|
||||
self,
|
||||
_('TEMPLATE ERROR'),
|
||||
@ -424,11 +426,12 @@ class Metadata(object):
|
||||
'''
|
||||
if not ops:
|
||||
return
|
||||
formatter = SafeFormat()
|
||||
for op in ops:
|
||||
try:
|
||||
src = op[0]
|
||||
dest = op[1]
|
||||
val = composite_formatter.safe_format\
|
||||
val = formatter.safe_format\
|
||||
(src, other, 'PLUGBOARD TEMPLATE ERROR', other)
|
||||
if dest == 'tags':
|
||||
self.set(dest, [f.strip() for f in val.split(',') if f.strip()])
|
||||
|
@ -7,12 +7,13 @@ from urllib import unquote
|
||||
from PyQt4.Qt import (QVariant, QFileInfo, QObject, SIGNAL, QBuffer, Qt,
|
||||
QByteArray, QTranslator, QCoreApplication, QThread,
|
||||
QEvent, QTimer, pyqtSignal, QDate, QDesktopServices,
|
||||
QFileDialog, QFileIconProvider,
|
||||
QFileDialog, QFileIconProvider, QSettings,
|
||||
QIcon, QApplication, QDialog, QUrl, QFont)
|
||||
|
||||
ORG_NAME = 'KovidsBrain'
|
||||
APP_UID = 'libprs500'
|
||||
from calibre.constants import islinux, iswindows, isbsd, isfrozen, isosx
|
||||
from calibre.constants import (islinux, iswindows, isbsd, isfrozen, isosx,
|
||||
config_dir)
|
||||
from calibre.utils.config import Config, ConfigProxy, dynamic, JSONConfig
|
||||
from calibre.utils.localization import set_qt_translator
|
||||
from calibre.ebooks.metadata import MetaInformation
|
||||
@ -192,6 +193,11 @@ def _config(): # {{{
|
||||
config = _config()
|
||||
# }}}
|
||||
|
||||
QSettings.setPath(QSettings.IniFormat, QSettings.UserScope, config_dir)
|
||||
QSettings.setPath(QSettings.IniFormat, QSettings.SystemScope,
|
||||
config_dir)
|
||||
QSettings.setDefaultFormat(QSettings.IniFormat)
|
||||
|
||||
# Turn off DeprecationWarnings in windows GUI
|
||||
if iswindows:
|
||||
import warnings
|
||||
|
@ -5,7 +5,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import os, shutil
|
||||
import os
|
||||
from functools import partial
|
||||
|
||||
from PyQt4.Qt import QMenu, Qt, QInputDialog, QToolButton
|
||||
@ -14,7 +14,7 @@ from calibre import isbytestring
|
||||
from calibre.constants import filesystem_encoding, iswindows
|
||||
from calibre.utils.config import prefs
|
||||
from calibre.gui2 import (gprefs, warning_dialog, Dispatcher, error_dialog,
|
||||
question_dialog, info_dialog)
|
||||
question_dialog, info_dialog, open_local_file)
|
||||
from calibre.library.database2 import LibraryDatabase2
|
||||
from calibre.gui2.actions import InterfaceAction
|
||||
|
||||
@ -107,7 +107,7 @@ class ChooseLibraryAction(InterfaceAction):
|
||||
self.quick_menu_action = self.choose_menu.addMenu(self.quick_menu)
|
||||
self.rename_menu = QMenu(_('Rename library'))
|
||||
self.rename_menu_action = self.choose_menu.addMenu(self.rename_menu)
|
||||
self.delete_menu = QMenu(_('Delete library'))
|
||||
self.delete_menu = QMenu(_('Remove library'))
|
||||
self.delete_menu_action = self.choose_menu.addMenu(self.delete_menu)
|
||||
|
||||
ac = self.create_action(spec=(_('Pick a random book'), 'catalog.png',
|
||||
@ -252,22 +252,15 @@ class ChooseLibraryAction(InterfaceAction):
|
||||
|
||||
def delete_requested(self, name, location):
|
||||
loc = location.replace('/', os.sep)
|
||||
if not question_dialog(self.gui, _('Are you sure?'),
|
||||
_('<h1 style="color:red">WARNING</h1>')+
|
||||
_('<b style="color: red">All files</b> (not just ebooks) '
|
||||
'from <br><br><b>%s</b><br><br> will be '
|
||||
'<b>permanently deleted</b>. Are you sure?') % loc,
|
||||
show_copy_button=False, default_yes=False):
|
||||
return
|
||||
exists = self.gui.library_view.model().db.exists_at(loc)
|
||||
if exists:
|
||||
try:
|
||||
shutil.rmtree(loc, ignore_errors=True)
|
||||
except:
|
||||
pass
|
||||
self.stats.remove(location)
|
||||
self.build_menus()
|
||||
self.gui.iactions['Copy To Library'].build_menus()
|
||||
info_dialog(self.gui, _('Library removed'),
|
||||
_('The library %s has been removed from calibre. '
|
||||
'The files remain on your computer, if you want '
|
||||
'to delete them, you will have to do so manually.') % loc,
|
||||
show=True)
|
||||
open_local_file(loc)
|
||||
|
||||
def backup_status(self, location):
|
||||
dirty_text = 'no'
|
||||
|
@ -139,7 +139,12 @@ class RegexBuilder(QDialog, Ui_RegexBuilder):
|
||||
try:
|
||||
self.open_book(fpath)
|
||||
finally:
|
||||
os.remove(fpath)
|
||||
try:
|
||||
os.remove(fpath)
|
||||
except:
|
||||
# Fails on windows if the input plugin for this format keeps the file open
|
||||
# Happens for LIT files
|
||||
pass
|
||||
return True
|
||||
|
||||
def open_book(self, pathtoebook):
|
||||
@ -148,7 +153,8 @@ class RegexBuilder(QDialog, Ui_RegexBuilder):
|
||||
text = [u'']
|
||||
preprocessor = HTMLPreProcessor(None, False)
|
||||
for path in self.iterator.spine:
|
||||
html = open(path, 'rb').read().decode('utf-8', 'replace')
|
||||
with open(path, 'rb') as f:
|
||||
html = f.read().decode('utf-8', 'replace')
|
||||
html = preprocessor(html, get_preprocess_html=True)
|
||||
text.append(html)
|
||||
self.preview.setPlainText('\n---\n'.join(text))
|
||||
|
@ -11,8 +11,8 @@ import sys, cPickle, shutil, importlib
|
||||
from PyQt4.Qt import QString, SIGNAL, QAbstractListModel, Qt, QVariant, QFont
|
||||
|
||||
from calibre.gui2 import ResizableDialog, NONE
|
||||
from calibre.ebooks.conversion.config import GuiRecommendations, save_specifics, \
|
||||
load_specifics
|
||||
from calibre.ebooks.conversion.config import (GuiRecommendations, save_specifics,
|
||||
load_specifics)
|
||||
from calibre.gui2.convert.single_ui import Ui_Dialog
|
||||
from calibre.gui2.convert.metadata import MetadataWidget
|
||||
from calibre.gui2.convert.look_and_feel import LookAndFeelWidget
|
||||
@ -24,7 +24,8 @@ from calibre.gui2.convert.toc import TOCWidget
|
||||
from calibre.gui2.convert.debug import DebugWidget
|
||||
|
||||
|
||||
from calibre.ebooks.conversion.plumber import Plumber, supported_input_formats
|
||||
from calibre.ebooks.conversion.plumber import (Plumber,
|
||||
supported_input_formats, ARCHIVE_FMTS)
|
||||
from calibre.ebooks.conversion.config import delete_specifics
|
||||
from calibre.customize.ui import available_output_formats
|
||||
from calibre.customize.conversion import OptionRecommendation
|
||||
@ -158,7 +159,10 @@ class Config(ResizableDialog, Ui_Dialog):
|
||||
output_path = 'dummy.'+output_format
|
||||
log = Log()
|
||||
log.outputs = []
|
||||
self.plumber = Plumber('dummy.'+input_format, output_path, log)
|
||||
input_file = 'dummy.'+input_format
|
||||
if input_format in ARCHIVE_FMTS:
|
||||
input_file = 'dummy.html'
|
||||
self.plumber = Plumber(input_file, output_path, log)
|
||||
|
||||
def widget_factory(cls):
|
||||
return cls(self.stack, self.plumber.get_option_by_name,
|
||||
|
@ -12,7 +12,7 @@ from PyQt4.Qt import Qt, QDialog, QGridLayout, QVBoxLayout, QFont, QLabel, \
|
||||
from calibre.gui2.dialogs.metadata_bulk_ui import Ui_MetadataBulkDialog
|
||||
from calibre.gui2.dialogs.tag_editor import TagEditor
|
||||
from calibre.ebooks.metadata import string_to_authors, authors_to_string, title_sort
|
||||
from calibre.ebooks.metadata.book.base import composite_formatter
|
||||
from calibre.ebooks.metadata.book.base import SafeFormat
|
||||
from calibre.gui2.custom_column_widgets import populate_metadata_page
|
||||
from calibre.gui2 import error_dialog, ResizableDialog, UNDEFINED_QDATE, \
|
||||
gprefs, question_dialog
|
||||
@ -499,7 +499,7 @@ class MetadataBulkDialog(ResizableDialog, Ui_MetadataBulkDialog):
|
||||
def s_r_get_field(self, mi, field):
|
||||
if field:
|
||||
if field == '{template}':
|
||||
v = composite_formatter.safe_format\
|
||||
v = SafeFormat().safe_format\
|
||||
(unicode(self.s_r_template.text()), mi, _('S/R TEMPLATE ERROR'), mi)
|
||||
return [v]
|
||||
fm = self.db.metadata_for_field(field)
|
||||
|
@ -11,7 +11,7 @@ from PyQt4.Qt import (Qt, QDialog, QDialogButtonBox, QSyntaxHighlighter, QFont,
|
||||
from calibre.gui2 import error_dialog
|
||||
from calibre.gui2.dialogs.template_dialog_ui import Ui_TemplateDialog
|
||||
from calibre.utils.formatter_functions import formatter_functions
|
||||
from calibre.ebooks.metadata.book.base import composite_formatter, Metadata
|
||||
from calibre.ebooks.metadata.book.base import SafeFormat, Metadata
|
||||
from calibre.library.coloring import (displayable_columns)
|
||||
|
||||
|
||||
@ -270,7 +270,7 @@ class TemplateDialog(QDialog, Ui_TemplateDialog):
|
||||
self.highlighter.regenerate_paren_positions()
|
||||
self.text_cursor_changed()
|
||||
self.template_value.setText(
|
||||
composite_formatter.safe_format(cur_text, self.mi,
|
||||
SafeFormat().safe_format(cur_text, self.mi,
|
||||
_('EXCEPTION: '), self.mi))
|
||||
|
||||
def text_cursor_changed(self):
|
||||
|
@ -16,7 +16,7 @@ from calibre.constants import isosx, __appname__, preferred_encoding, \
|
||||
from calibre.gui2 import config, is_widescreen, gprefs
|
||||
from calibre.gui2.library.views import BooksView, DeviceBooksView
|
||||
from calibre.gui2.widgets import Splitter
|
||||
from calibre.gui2.tag_view import TagBrowserWidget
|
||||
from calibre.gui2.tag_browser.ui import TagBrowserWidget
|
||||
from calibre.gui2.book_details import BookDetails
|
||||
from calibre.gui2.notify import get_notifier
|
||||
|
||||
|
@ -14,7 +14,7 @@ from PyQt4.Qt import (QAbstractTableModel, Qt, pyqtSignal, QIcon, QImage,
|
||||
from calibre.gui2 import NONE, UNDEFINED_QDATE
|
||||
from calibre.utils.pyparsing import ParseException
|
||||
from calibre.ebooks.metadata import fmt_sidx, authors_to_string, string_to_authors
|
||||
from calibre.ebooks.metadata.book.base import composite_formatter
|
||||
from calibre.ebooks.metadata.book.base import SafeFormat
|
||||
from calibre.ptempfile import PersistentTemporaryFile
|
||||
from calibre.utils.config import tweaks, prefs
|
||||
from calibre.utils.date import dt_factory, qt_to_dt
|
||||
@ -92,6 +92,7 @@ class BooksModel(QAbstractTableModel): # {{{
|
||||
self.highlight_only = False
|
||||
self.current_row = -1
|
||||
self.colors = frozenset([unicode(c) for c in QColor.colorNames()])
|
||||
self.formatter = SafeFormat()
|
||||
self.read_config()
|
||||
|
||||
def change_alignment(self, colname, alignment):
|
||||
@ -717,7 +718,7 @@ class BooksModel(QAbstractTableModel): # {{{
|
||||
try:
|
||||
if mi is None:
|
||||
mi = self.db.get_metadata(id_, index_is_id=True)
|
||||
color = composite_formatter.safe_format(fmt, mi, '', mi)
|
||||
color = self.formatter.safe_format(fmt, mi, '', mi)
|
||||
if color in self.colors:
|
||||
color = QColor(color)
|
||||
if color.isValid():
|
||||
|
@ -173,9 +173,20 @@ class TitleSortEdit(TitleEdit):
|
||||
|
||||
def auto_generate(self, *args):
|
||||
self.current_val = title_sort(self.title_edit.current_val)
|
||||
self.title_edit.textChanged.disconnect()
|
||||
self.textChanged.disconnect()
|
||||
self.autogen_button.clicked.disconnect()
|
||||
|
||||
def break_cycles(self):
|
||||
try:
|
||||
self.title_edit.textChanged.disconnect()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
self.textChanged.disconnect()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
self.autogen_button.clicked.disconnect()
|
||||
except:
|
||||
pass
|
||||
|
||||
# }}}
|
||||
|
||||
@ -280,7 +291,10 @@ class AuthorsEdit(MultiCompleteComboBox):
|
||||
|
||||
def break_cycles(self):
|
||||
self.db = self.dialog = None
|
||||
self.manage_authors_signal.triggered.disconnect()
|
||||
try:
|
||||
self.manage_authors_signal.triggered.disconnect()
|
||||
except:
|
||||
pass
|
||||
|
||||
class AuthorSortEdit(EnLineEdit):
|
||||
|
||||
@ -387,11 +401,26 @@ class AuthorSortEdit(EnLineEdit):
|
||||
|
||||
def break_cycles(self):
|
||||
self.db = None
|
||||
self.authors_edit.editTextChanged.disconnect()
|
||||
self.textChanged.disconnect()
|
||||
self.autogen_button.clicked.disconnect()
|
||||
self.copy_a_to_as_action.triggered.disconnect()
|
||||
self.copy_as_to_a_action.triggered.disconnect()
|
||||
try:
|
||||
self.authors_edit.editTextChanged.disconnect()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
self.textChanged.disconnect()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
self.autogen_button.clicked.disconnect()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
self.copy_a_to_as_action.triggered.disconnect()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
self.copy_as_to_a_action.triggered.disconnect()
|
||||
except:
|
||||
pass
|
||||
self.authors_edit = None
|
||||
|
||||
# }}}
|
||||
@ -519,9 +548,18 @@ class SeriesIndexEdit(QDoubleSpinBox):
|
||||
traceback.print_exc()
|
||||
|
||||
def break_cycles(self):
|
||||
self.series_edit.currentIndexChanged.disconnect()
|
||||
self.series_edit.editTextChanged.disconnect()
|
||||
self.series_edit.lineEdit().editingFinished.disconnect()
|
||||
try:
|
||||
self.series_edit.currentIndexChanged.disconnect()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
self.series_edit.editTextChanged.disconnect()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
self.series_edit.lineEdit().editingFinished.disconnect()
|
||||
except:
|
||||
pass
|
||||
self.db = self.series_edit = self.dialog = None
|
||||
|
||||
# }}}
|
||||
@ -898,7 +936,10 @@ class Cover(ImageView): # {{{
|
||||
return True
|
||||
|
||||
def break_cycles(self):
|
||||
self.cover_changed.disconnect()
|
||||
try:
|
||||
self.cover_changed.disconnect()
|
||||
except:
|
||||
pass
|
||||
self.dialog = self._cdata = self.current_val = self.original_val = None
|
||||
|
||||
# }}}
|
||||
|
@ -1,89 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import (unicode_literals, division, absolute_import, print_function)
|
||||
|
||||
__license__ = 'GPL 3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import urllib
|
||||
from contextlib import closing
|
||||
|
||||
from lxml import html
|
||||
|
||||
from PyQt4.Qt import QUrl
|
||||
|
||||
from calibre import browser, url_slash_cleaner
|
||||
from calibre.gui2 import open_url
|
||||
from calibre.gui2.store import StorePlugin
|
||||
from calibre.gui2.store.basic_config import BasicStoreConfig
|
||||
from calibre.gui2.store.search_result import SearchResult
|
||||
from calibre.gui2.store.web_store_dialog import WebStoreDialog
|
||||
|
||||
class ArchiveOrgStore(BasicStoreConfig, StorePlugin):
|
||||
|
||||
def open(self, parent=None, detail_item=None, external=False):
|
||||
url = 'http://www.archive.org/details/texts'
|
||||
|
||||
if detail_item:
|
||||
detail_item = url_slash_cleaner('http://www.archive.org' + detail_item)
|
||||
|
||||
if external or self.config.get('open_external', False):
|
||||
open_url(QUrl(url_slash_cleaner(detail_item if detail_item else url)))
|
||||
else:
|
||||
d = WebStoreDialog(self.gui, url, parent, detail_item)
|
||||
d.setWindowTitle(self.name)
|
||||
d.set_tags(self.config.get('tags', ''))
|
||||
d.exec_()
|
||||
|
||||
def search(self, query, max_results=10, timeout=60):
|
||||
query = query + ' AND mediatype:texts'
|
||||
url = 'http://www.archive.org/search.php?query=' + urllib.quote(query)
|
||||
|
||||
br = browser()
|
||||
|
||||
counter = max_results
|
||||
with closing(br.open(url, timeout=timeout)) as f:
|
||||
doc = html.fromstring(f.read())
|
||||
for data in doc.xpath('//td[@class="hitCell"]'):
|
||||
if counter <= 0:
|
||||
break
|
||||
|
||||
id = ''.join(data.xpath('.//a[@class="titleLink"]/@href'))
|
||||
if not id:
|
||||
continue
|
||||
|
||||
title = ''.join(data.xpath('.//a[@class="titleLink"]//text()'))
|
||||
authors = data.xpath('.//text()')
|
||||
if not authors:
|
||||
continue
|
||||
author = None
|
||||
for a in authors:
|
||||
if '-' in a:
|
||||
author = a.replace('-', ' ').strip()
|
||||
if author:
|
||||
break
|
||||
if not author:
|
||||
continue
|
||||
|
||||
counter -= 1
|
||||
|
||||
s = SearchResult()
|
||||
s.title = title.strip()
|
||||
s.author = author.strip()
|
||||
s.price = '$0.00'
|
||||
s.detail_item = id.strip()
|
||||
s.drm = SearchResult.DRM_UNLOCKED
|
||||
|
||||
yield s
|
||||
|
||||
def get_details(self, search_result, timeout):
|
||||
url = url_slash_cleaner('http://www.archive.org' + search_result.detail_item)
|
||||
|
||||
br = browser()
|
||||
with closing(br.open(url, timeout=timeout)) as nf:
|
||||
idata = html.fromstring(nf.read())
|
||||
formats = ', '.join(idata.xpath('//p[@id="dl" and @class="content"]//a/text()'))
|
||||
search_result.formats = formats.upper()
|
||||
|
||||
return True
|
@ -133,7 +133,7 @@ class Matches(QAbstractItemModel):
|
||||
return QVariant('<p>%s</p>' % result.description)
|
||||
elif col == 2:
|
||||
if result.drm_free_only:
|
||||
return QVariant('<p>' + _('This store only distributes ebooks with DRM.') + '</p>')
|
||||
return QVariant('<p>' + _('This store only distributes ebooks without DRM.') + '</p>')
|
||||
else:
|
||||
return QVariant('<p>' + _('This store distributes ebooks with DRM. It may have some titles without DRM, but you will need to check on a per title basis.') + '</p>')
|
||||
elif col == 3:
|
||||
|
@ -1,9 +1,6 @@
|
||||
This is a list of stores that objected, declined
|
||||
or asked not to be included in the store integration.
|
||||
|
||||
* Borders (http://www.borders.com/)
|
||||
* Indigo (http://www.chapters.indigo.ca/)
|
||||
* Borders (http://www.borders.com/).
|
||||
* Indigo (http://www.chapters.indigo.ca/).
|
||||
* Libraria Rizzoli (http://libreriarizzoli.corriere.it/).
|
||||
No reply with two attempts over 2 weeks
|
||||
* WH Smith (http://www.whsmith.co.uk/)
|
||||
Refused to permit signing up for the affiliate program
|
@ -1,106 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import (unicode_literals, division, absolute_import, print_function)
|
||||
|
||||
__license__ = 'GPL 3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import urllib2
|
||||
from contextlib import closing
|
||||
|
||||
from lxml import html
|
||||
|
||||
from PyQt4.Qt import QUrl
|
||||
|
||||
from calibre import browser, url_slash_cleaner
|
||||
from calibre.gui2 import open_url
|
||||
from calibre.gui2.store import StorePlugin
|
||||
from calibre.gui2.store.basic_config import BasicStoreConfig
|
||||
from calibre.gui2.store.search_result import SearchResult
|
||||
from calibre.gui2.store.web_store_dialog import WebStoreDialog
|
||||
|
||||
class FeedbooksStore(BasicStoreConfig, StorePlugin):
|
||||
|
||||
def open(self, parent=None, detail_item=None, external=False):
|
||||
url = 'http://m.feedbooks.com/'
|
||||
ext_url = 'http://feedbooks.com/'
|
||||
|
||||
if external or self.config.get('open_external', False):
|
||||
if detail_item:
|
||||
ext_url = ext_url + detail_item
|
||||
open_url(QUrl(url_slash_cleaner(ext_url)))
|
||||
else:
|
||||
detail_url = None
|
||||
if detail_item:
|
||||
detail_url = url + detail_item
|
||||
d = WebStoreDialog(self.gui, url, parent, detail_url)
|
||||
d.setWindowTitle(self.name)
|
||||
d.set_tags(self.config.get('tags', ''))
|
||||
d.exec_()
|
||||
|
||||
def search(self, query, max_results=10, timeout=60):
|
||||
url = 'http://m.feedbooks.com/search?query=' + urllib2.quote(query)
|
||||
|
||||
br = browser()
|
||||
|
||||
counter = max_results
|
||||
with closing(br.open(url, timeout=timeout)) as f:
|
||||
doc = html.fromstring(f.read())
|
||||
for data in doc.xpath('//ul[@class="m-list"]//li'):
|
||||
if counter <= 0:
|
||||
break
|
||||
data = html.fromstring(html.tostring(data))
|
||||
|
||||
id = ''
|
||||
id_a = data.xpath('//a[@class="buy"]')
|
||||
if id_a:
|
||||
id = id_a[0].get('href', None)
|
||||
id = id.split('/')[-2]
|
||||
id = '/item/' + id
|
||||
else:
|
||||
id_a = data.xpath('//a[@class="download"]')
|
||||
if id_a:
|
||||
id = id_a[0].get('href', None)
|
||||
id = id.split('/')[-1]
|
||||
id = id.split('.')[0]
|
||||
id = '/book/' + id
|
||||
if not id:
|
||||
continue
|
||||
|
||||
title = ''.join(data.xpath('//h5//a/text()'))
|
||||
author = ''.join(data.xpath('//h6//a/text()'))
|
||||
price = ''.join(data.xpath('//a[@class="buy"]/text()'))
|
||||
formats = 'EPUB'
|
||||
if not price:
|
||||
price = '$0.00'
|
||||
formats = 'EPUB, MOBI, PDF'
|
||||
cover_url = ''
|
||||
cover_url_img = data.xpath('//img')
|
||||
if cover_url_img:
|
||||
cover_url = cover_url_img[0].get('src')
|
||||
cover_url.split('?')[0]
|
||||
|
||||
counter -= 1
|
||||
|
||||
s = SearchResult()
|
||||
s.cover_url = cover_url
|
||||
s.title = title.strip()
|
||||
s.author = author.strip()
|
||||
s.price = price.replace(' ', '').strip()
|
||||
s.detail_item = id.strip()
|
||||
s.formats = formats
|
||||
|
||||
yield s
|
||||
|
||||
def get_details(self, search_result, timeout):
|
||||
url = 'http://m.feedbooks.com/'
|
||||
|
||||
br = browser()
|
||||
with closing(br.open(url_slash_cleaner(url + search_result.detail_item), timeout=timeout)) as nf:
|
||||
idata = html.fromstring(nf.read())
|
||||
if idata.xpath('boolean(//div[contains(@class, "m-description-long")]//p[contains(., "DRM") or contains(b, "Protection")])'):
|
||||
search_result.drm = SearchResult.DRM_LOCKED
|
||||
else:
|
||||
search_result.drm = SearchResult.DRM_UNLOCKED
|
||||
return True
|
75
src/calibre/gui2/store/opensearch_store.py
Normal file
75
src/calibre/gui2/store/opensearch_store.py
Normal file
@ -0,0 +1,75 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import (unicode_literals, division, absolute_import, print_function)
|
||||
|
||||
__license__ = 'GPL 3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import mimetypes
|
||||
import urllib
|
||||
|
||||
from PyQt4.Qt import QUrl
|
||||
|
||||
from calibre.gui2 import open_url
|
||||
from calibre.gui2.store import StorePlugin
|
||||
from calibre.gui2.store.search_result import SearchResult
|
||||
from calibre.gui2.store.web_store_dialog import WebStoreDialog
|
||||
from calibre.utils.opensearch import Client
|
||||
|
||||
class OpenSearchStore(StorePlugin):
|
||||
|
||||
open_search_url = ''
|
||||
web_url = ''
|
||||
|
||||
def open(self, parent=None, detail_item=None, external=False):
|
||||
if not hasattr(self, 'web_url'):
|
||||
return
|
||||
|
||||
if external or self.config.get('open_external', False):
|
||||
open_url(QUrl(detail_item if detail_item else self.web_url))
|
||||
else:
|
||||
d = WebStoreDialog(self.gui, self.web_url, parent, detail_item)
|
||||
d.setWindowTitle(self.name)
|
||||
d.set_tags(self.config.get('tags', ''))
|
||||
d.exec_()
|
||||
|
||||
def search(self, query, max_results=10, timeout=60):
|
||||
if not hasattr(self, 'open_search_url'):
|
||||
return
|
||||
|
||||
client = Client(self.open_search_url)
|
||||
results = client.search(urllib.quote_plus(query), max_results)
|
||||
|
||||
counter = max_results
|
||||
for r in results:
|
||||
if counter <= 0:
|
||||
break
|
||||
counter -= 1
|
||||
|
||||
s = SearchResult()
|
||||
|
||||
s.detail_item = r.get('id', '')
|
||||
|
||||
links = r.get('links', None)
|
||||
for l in links:
|
||||
if l.get('rel', None):
|
||||
if l['rel'] in ('http://opds-spec.org/thumbnail', 'http://opds-spec.org/image/thumbnail'):
|
||||
s.cover_url = l.get('href', '')
|
||||
elif l['rel'] == u'http://opds-spec.org/acquisition/buy':
|
||||
s.detail_item = l.get('href', s.detail_item)
|
||||
elif l['rel'] == u'http://opds-spec.org/acquisition':
|
||||
mime = l.get('type', '')
|
||||
if mime:
|
||||
ext = mimetypes.guess_extension(mime)
|
||||
if ext:
|
||||
ext = ext[1:].upper()
|
||||
s.downloads[ext] = l.get('href', '')
|
||||
|
||||
s.formats = ', '.join(s.downloads.keys())
|
||||
|
||||
s.title = r.get('title', '')
|
||||
s.author = r.get('author', '')
|
||||
s.price = r.get('price', '')
|
||||
|
||||
yield s
|
@ -1,84 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import (unicode_literals, division, absolute_import, print_function)
|
||||
|
||||
__license__ = 'GPL 3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import urllib
|
||||
from contextlib import closing
|
||||
|
||||
from lxml import html
|
||||
|
||||
from PyQt4.Qt import QUrl
|
||||
|
||||
from calibre import browser, url_slash_cleaner
|
||||
from calibre.gui2 import open_url
|
||||
from calibre.gui2.store import StorePlugin
|
||||
from calibre.gui2.store.basic_config import BasicStoreConfig
|
||||
from calibre.gui2.store.search_result import SearchResult
|
||||
from calibre.gui2.store.web_store_dialog import WebStoreDialog
|
||||
|
||||
class PragmaticBookshelfStore(BasicStoreConfig, StorePlugin):
|
||||
|
||||
def open(self, parent=None, detail_item=None, external=False):
|
||||
url = 'http://pragprog.com/'
|
||||
|
||||
if external or self.config.get('open_external', False):
|
||||
open_url(QUrl(url_slash_cleaner(detail_item if detail_item else url)))
|
||||
else:
|
||||
d = WebStoreDialog(self.gui, url, parent, detail_item)
|
||||
d.setWindowTitle(self.name)
|
||||
d.set_tags(self.config.get('tags', ''))
|
||||
d.exec_()
|
||||
|
||||
def search(self, query, max_results=10, timeout=60):
|
||||
'''
|
||||
OPDS based search.
|
||||
|
||||
We really should get the catelog from http://pragprog.com/catalog.opds
|
||||
and look for the application/opensearchdescription+xml entry.
|
||||
Then get the opensearch description to get the search url and
|
||||
format. However, we are going to be lazy and hard code it.
|
||||
'''
|
||||
url = 'http://pragprog.com/catalog/search?q=' + urllib.quote_plus(query)
|
||||
|
||||
br = browser()
|
||||
|
||||
counter = max_results
|
||||
with closing(br.open(url, timeout=timeout)) as f:
|
||||
# Use html instead of etree as html allows us
|
||||
# to ignore the namespace easily.
|
||||
doc = html.fromstring(f.read())
|
||||
for data in doc.xpath('//entry'):
|
||||
if counter <= 0:
|
||||
break
|
||||
|
||||
id = ''.join(data.xpath('.//link[@rel="http://opds-spec.org/acquisition/buy"]/@href'))
|
||||
if not id:
|
||||
continue
|
||||
|
||||
price = ''.join(data.xpath('.//price/@currencycode')).strip()
|
||||
price += ' '
|
||||
price += ''.join(data.xpath('.//price/text()')).strip()
|
||||
if not price.strip():
|
||||
continue
|
||||
|
||||
cover_url = ''.join(data.xpath('.//link[@rel="http://opds-spec.org/cover"]/@href'))
|
||||
|
||||
title = ''.join(data.xpath('.//title/text()'))
|
||||
author = ''.join(data.xpath('.//author//text()'))
|
||||
|
||||
counter -= 1
|
||||
|
||||
s = SearchResult()
|
||||
s.cover_url = cover_url
|
||||
s.title = title.strip()
|
||||
s.author = author.strip()
|
||||
s.price = price.strip()
|
||||
s.detail_item = id.strip()
|
||||
s.drm = SearchResult.DRM_UNLOCKED
|
||||
s.formats = 'EPUB, PDF, MOBI'
|
||||
|
||||
yield s
|
@ -45,6 +45,7 @@ class AdvSearchBuilderDialog(QDialog, Ui_Dialog):
|
||||
self.author_box.setText('')
|
||||
self.price_box.setText('')
|
||||
self.format_box.setText('')
|
||||
self.download_combo.setCurrentIndex(0)
|
||||
self.affiliate_combo.setCurrentIndex(0)
|
||||
|
||||
def tokens(self, raw):
|
||||
@ -119,6 +120,9 @@ class AdvSearchBuilderDialog(QDialog, Ui_Dialog):
|
||||
format = unicode(self.format_box.text()).strip()
|
||||
if format:
|
||||
ans.append('format:"' + self.mc + format + '"')
|
||||
download = unicode(self.download_combo.currentText()).strip()
|
||||
if download:
|
||||
ans.append('download:' + download)
|
||||
affiliate = unicode(self.affiliate_combo.currentText()).strip()
|
||||
if affiliate:
|
||||
ans.append('affiliate:' + affiliate)
|
||||
|
@ -226,7 +226,7 @@
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="7" column="0" colspan="2">
|
||||
<item row="8" column="0" colspan="2">
|
||||
<layout class="QHBoxLayout" name="horizontalLayout_6">
|
||||
<item>
|
||||
<widget class="QPushButton" name="clear_button">
|
||||
@ -244,7 +244,7 @@
|
||||
</item>
|
||||
</layout>
|
||||
</item>
|
||||
<item row="6" column="1">
|
||||
<item row="7" column="1">
|
||||
<spacer name="verticalSpacer">
|
||||
<property name="orientation">
|
||||
<enum>Qt::Vertical</enum>
|
||||
@ -283,14 +283,14 @@
|
||||
<item row="3" column="1">
|
||||
<widget class="EnLineEdit" name="price_box"/>
|
||||
</item>
|
||||
<item row="5" column="0">
|
||||
<item row="6" column="0">
|
||||
<widget class="QLabel" name="label_9">
|
||||
<property name="text">
|
||||
<string>Affiliate:</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="5" column="1">
|
||||
<item row="6" column="1">
|
||||
<widget class="QComboBox" name="affiliate_combo">
|
||||
<item>
|
||||
<property name="text">
|
||||
@ -309,6 +309,32 @@
|
||||
</item>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="5" column="0">
|
||||
<widget class="QLabel" name="label_12">
|
||||
<property name="text">
|
||||
<string>Download:</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="5" column="1">
|
||||
<widget class="QComboBox" name="download_combo">
|
||||
<item>
|
||||
<property name="text">
|
||||
<string/>
|
||||
</property>
|
||||
</item>
|
||||
<item>
|
||||
<property name="text">
|
||||
<string>true</string>
|
||||
</property>
|
||||
</item>
|
||||
<item>
|
||||
<property name="text">
|
||||
<string>false</string>
|
||||
</property>
|
||||
</item>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
</widget>
|
||||
|
@ -33,7 +33,7 @@ class Matches(QAbstractItemModel):
|
||||
|
||||
total_changed = pyqtSignal(int)
|
||||
|
||||
HEADERS = [_('Cover'), _('Title'), _('Price'), _('DRM'), _('Store'), '']
|
||||
HEADERS = [_('Cover'), _('Title'), _('Price'), _('DRM'), _('Store'), _('Download'), _('Affiliate')]
|
||||
HTML_COLS = (1, 4)
|
||||
|
||||
def __init__(self, cover_thread_count=2, detail_thread_count=4):
|
||||
@ -47,6 +47,8 @@ class Matches(QAbstractItemModel):
|
||||
Qt.SmoothTransformation)
|
||||
self.DONATE_ICON = QPixmap(I('donate.png')).scaledToHeight(16,
|
||||
Qt.SmoothTransformation)
|
||||
self.DOWNLOAD_ICON = QPixmap(I('arrow-down.png')).scaledToHeight(16,
|
||||
Qt.SmoothTransformation)
|
||||
|
||||
# All matches. Used to determine the order to display
|
||||
# self.matches because the SearchFilter returns
|
||||
@ -181,9 +183,11 @@ class Matches(QAbstractItemModel):
|
||||
elif result.drm == SearchResult.DRM_UNKNOWN:
|
||||
return QVariant(self.DRM_UNKNOWN_ICON)
|
||||
if col == 5:
|
||||
if result.downloads:
|
||||
return QVariant(self.DOWNLOAD_ICON)
|
||||
if col == 6:
|
||||
if result.affiliate:
|
||||
return QVariant(self.DONATE_ICON)
|
||||
return NONE
|
||||
elif role == Qt.ToolTipRole:
|
||||
if col == 1:
|
||||
return QVariant('<p>%s</p>' % result.title)
|
||||
@ -199,6 +203,9 @@ class Matches(QAbstractItemModel):
|
||||
elif col == 4:
|
||||
return QVariant('<p>%s</p>' % result.formats)
|
||||
elif col == 5:
|
||||
if result.downloads:
|
||||
return QVariant('<p>' + _('The following formats can be downloaded directly: %s.') % ', '.join(result.downloads.keys()) + '</p>')
|
||||
elif col == 6:
|
||||
if result.affiliate:
|
||||
return QVariant('<p>' + _('Buying from this store supports the calibre developer: %s.') % result.plugin_author + '</p>')
|
||||
elif role == Qt.SizeHintRole:
|
||||
@ -221,6 +228,11 @@ class Matches(QAbstractItemModel):
|
||||
elif col == 4:
|
||||
text = result.store_name
|
||||
elif col == 5:
|
||||
if result.downloads:
|
||||
text = 'a'
|
||||
else:
|
||||
text = 'b'
|
||||
elif col == 6:
|
||||
if result.affiliate:
|
||||
text = 'a'
|
||||
else:
|
||||
@ -257,6 +269,8 @@ class SearchFilter(SearchQueryParser):
|
||||
'author',
|
||||
'authors',
|
||||
'cover',
|
||||
'download',
|
||||
'downloads',
|
||||
'drm',
|
||||
'format',
|
||||
'formats',
|
||||
@ -282,6 +296,8 @@ class SearchFilter(SearchQueryParser):
|
||||
location = location.lower().strip()
|
||||
if location == 'authors':
|
||||
location = 'author'
|
||||
elif location == 'downloads':
|
||||
location = 'download'
|
||||
elif location == 'formats':
|
||||
location = 'format'
|
||||
|
||||
@ -308,12 +324,13 @@ class SearchFilter(SearchQueryParser):
|
||||
'author': lambda x: x.author.lower(),
|
||||
'cover': attrgetter('cover_url'),
|
||||
'drm': attrgetter('drm'),
|
||||
'download': attrgetter('downloads'),
|
||||
'format': attrgetter('formats'),
|
||||
'price': lambda x: comparable_price(x.price),
|
||||
'store': lambda x: x.store_name.lower(),
|
||||
'title': lambda x: x.title.lower(),
|
||||
}
|
||||
for x in ('author', 'format'):
|
||||
for x in ('author', 'download', 'format'):
|
||||
q[x+'s'] = q[x]
|
||||
for sr in self.srs:
|
||||
for locvalue in locations:
|
||||
@ -347,7 +364,7 @@ class SearchFilter(SearchQueryParser):
|
||||
matches.add(sr)
|
||||
continue
|
||||
# this is bool or treated as bool, so can't match below.
|
||||
if locvalue in ('affiliate', 'drm'):
|
||||
if locvalue in ('affiliate', 'drm', 'download', 'downloads'):
|
||||
continue
|
||||
try:
|
||||
### Can't separate authors because comma is used for name sep and author sep
|
||||
|
@ -6,13 +6,18 @@ __license__ = 'GPL 3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
from PyQt4.Qt import (QTreeView)
|
||||
from functools import partial
|
||||
|
||||
from PyQt4.Qt import (pyqtSignal, QMenu, QTreeView)
|
||||
|
||||
from calibre.gui2.metadata.single_download import RichTextDelegate
|
||||
from calibre.gui2.store.search.models import Matches
|
||||
|
||||
class ResultsView(QTreeView):
|
||||
|
||||
download_requested = pyqtSignal(object)
|
||||
open_requested = pyqtSignal(object)
|
||||
|
||||
def __init__(self, *args):
|
||||
QTreeView.__init__(self,*args)
|
||||
|
||||
@ -24,3 +29,18 @@ class ResultsView(QTreeView):
|
||||
for i in self._model.HTML_COLS:
|
||||
self.setItemDelegateForColumn(i, self.rt_delegate)
|
||||
|
||||
def contextMenuEvent(self, event):
|
||||
index = self.indexAt(event.pos())
|
||||
|
||||
if not index.isValid():
|
||||
return
|
||||
|
||||
result = self.model().get_result(index)
|
||||
|
||||
menu = QMenu()
|
||||
da = menu.addAction(_('Download...'), partial(self.download_requested.emit, result))
|
||||
if not result.downloads:
|
||||
da.setEnabled(False)
|
||||
menu.addSeparator()
|
||||
menu.addAction(_('Goto in store...'), partial(self.open_requested.emit, result))
|
||||
menu.exec_(event.globalPos())
|
||||
|
@ -14,6 +14,7 @@ from PyQt4.Qt import (Qt, QDialog, QDialogButtonBox, QTimer, QCheckBox, QLabel,
|
||||
QComboBox)
|
||||
|
||||
from calibre.gui2 import JSONConfig, info_dialog
|
||||
from calibre.gui2.dialogs.choose_format import ChooseFormatDialog
|
||||
from calibre.gui2.progress_indicator import ProgressIndicator
|
||||
from calibre.gui2.store.config.chooser.chooser_widget import StoreChooserWidget
|
||||
from calibre.gui2.store.config.search.search_widget import StoreConfigWidget
|
||||
@ -72,7 +73,9 @@ class SearchDialog(QDialog, Ui_Dialog):
|
||||
self.search.clicked.connect(self.do_search)
|
||||
self.checker.timeout.connect(self.get_results)
|
||||
self.progress_checker.timeout.connect(self.check_progress)
|
||||
self.results_view.activated.connect(self.open_store)
|
||||
self.results_view.activated.connect(self.result_item_activated)
|
||||
self.results_view.download_requested.connect(self.download_book)
|
||||
self.results_view.open_requested.connect(self.open_store)
|
||||
self.results_view.model().total_changed.connect(self.update_book_total)
|
||||
self.select_all_stores.clicked.connect(self.stores_select_all)
|
||||
self.select_invert_stores.clicked.connect(self.stores_select_invert)
|
||||
@ -129,11 +132,15 @@ class SearchDialog(QDialog, Ui_Dialog):
|
||||
# Title / Author
|
||||
self.results_view.setColumnWidth(1,int(total*.40))
|
||||
# Price
|
||||
self.results_view.setColumnWidth(2,int(total*.20))
|
||||
self.results_view.setColumnWidth(2,int(total*.12))
|
||||
# DRM
|
||||
self.results_view.setColumnWidth(3, int(total*.15))
|
||||
# Store / Formats
|
||||
self.results_view.setColumnWidth(4, int(total*.25))
|
||||
# Download
|
||||
self.results_view.setColumnWidth(5, 20)
|
||||
# Affiliate
|
||||
self.results_view.setColumnWidth(6, 20)
|
||||
|
||||
def do_search(self):
|
||||
# Stop all running threads.
|
||||
@ -183,7 +190,7 @@ class SearchDialog(QDialog, Ui_Dialog):
|
||||
query = re.sub(r'%s:"(?P<a>[^\s"]+)"' % loc, '\g<a>', query)
|
||||
query = query.replace('%s:' % loc, '')
|
||||
# Remove the prefix and search text.
|
||||
for loc in ('cover', 'drm', 'format', 'formats', 'price', 'store'):
|
||||
for loc in ('cover', 'download', 'downloads', 'drm', 'format', 'formats', 'price', 'store'):
|
||||
query = re.sub(r'%s:"[^"]"' % loc, '', query)
|
||||
query = re.sub(r'%s:[^\s]*' % loc, '', query)
|
||||
# Remove logic.
|
||||
@ -330,8 +337,21 @@ class SearchDialog(QDialog, Ui_Dialog):
|
||||
def update_book_total(self, total):
|
||||
self.total.setText('%s' % total)
|
||||
|
||||
def open_store(self, index):
|
||||
def result_item_activated(self, index):
|
||||
result = self.results_view.model().get_result(index)
|
||||
|
||||
if result.downloads:
|
||||
self.download_book(result)
|
||||
else:
|
||||
self.open_store(result)
|
||||
|
||||
def download_book(self, result):
|
||||
d = ChooseFormatDialog(self, _('Choose format to download to your library.'), result.downloads.keys())
|
||||
if d.exec_() == d.Accepted:
|
||||
ext = d.format()
|
||||
self.gui.download_ebook(result.downloads[ext])
|
||||
|
||||
def open_store(self, result):
|
||||
self.gui.istores[result.store_name].open(self, result.detail_item, self.open_external.isChecked())
|
||||
|
||||
def check_progress(self):
|
||||
|
@ -22,6 +22,9 @@ class SearchResult(object):
|
||||
self.detail_item = ''
|
||||
self.drm = None
|
||||
self.formats = ''
|
||||
# key = format in upper case.
|
||||
# value = url to download the file.
|
||||
self.downloads = {}
|
||||
self.affiliate = False
|
||||
self.plugin_author = ''
|
||||
|
||||
|
3
src/calibre/gui2/store/stores/__init__.py
Normal file
3
src/calibre/gui2/store/stores/__init__.py
Normal file
@ -0,0 +1,3 @@
|
||||
'''
|
||||
All store plugins are placed here.
|
||||
'''
|
36
src/calibre/gui2/store/stores/archive_org_plugin.py
Normal file
36
src/calibre/gui2/store/stores/archive_org_plugin.py
Normal file
@ -0,0 +1,36 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import (unicode_literals, division, absolute_import, print_function)
|
||||
|
||||
__license__ = 'GPL 3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
|
||||
from calibre.gui2.store.basic_config import BasicStoreConfig
|
||||
from calibre.gui2.store.opensearch_store import OpenSearchStore
|
||||
from calibre.gui2.store.search_result import SearchResult
|
||||
|
||||
class ArchiveOrgStore(BasicStoreConfig, OpenSearchStore):
|
||||
|
||||
open_search_url = 'http://bookserver.archive.org/catalog/opensearch.xml'
|
||||
web_url = 'http://www.archive.org/details/texts'
|
||||
|
||||
# http://bookserver.archive.org/catalog/
|
||||
|
||||
def search(self, query, max_results=10, timeout=60):
|
||||
for s in OpenSearchStore.search(self, query, max_results, timeout):
|
||||
s.detail_item = 'http://www.archive.org/details/' + s.detail_item.split(':')[-1]
|
||||
s.price = '$0.00'
|
||||
s.drm = SearchResult.DRM_UNLOCKED
|
||||
yield s
|
||||
'''
|
||||
def get_details(self, search_result, timeout):
|
||||
br = browser()
|
||||
with closing(br.open(search_result.detail_item, timeout=timeout)) as nf:
|
||||
idata = html.fromstring(nf.read())
|
||||
formats = ', '.join(idata.xpath('//p[@id="dl" and @class="content"]//a/text()'))
|
||||
search_result.formats = formats.upper()
|
||||
|
||||
return True
|
||||
'''
|
27
src/calibre/gui2/store/stores/epubbud_plugin.py
Normal file
27
src/calibre/gui2/store/stores/epubbud_plugin.py
Normal file
@ -0,0 +1,27 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import (unicode_literals, division, absolute_import, print_function)
|
||||
|
||||
__license__ = 'GPL 3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
from calibre.gui2.store.basic_config import BasicStoreConfig
|
||||
from calibre.gui2.store.opensearch_store import OpenSearchStore
|
||||
from calibre.gui2.store.search_result import SearchResult
|
||||
|
||||
class EpubBudStore(BasicStoreConfig, OpenSearchStore):
|
||||
|
||||
open_search_url = 'http://www.epubbud.com/feeds/opensearch.xml'
|
||||
web_url = 'http://www.epubbud.com/'
|
||||
|
||||
# http://www.epubbud.com/feeds/catalog.atom
|
||||
|
||||
def search(self, query, max_results=10, timeout=60):
|
||||
for s in OpenSearchStore.search(self, query, max_results, timeout):
|
||||
s.price = '$0.00'
|
||||
s.drm = SearchResult.DRM_UNLOCKED
|
||||
s.formats = 'EPUB'
|
||||
# Download links are broken for this store.
|
||||
s.downloads = {}
|
||||
yield s
|
28
src/calibre/gui2/store/stores/feedbooks_plugin.py
Normal file
28
src/calibre/gui2/store/stores/feedbooks_plugin.py
Normal file
@ -0,0 +1,28 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import (unicode_literals, division, absolute_import, print_function)
|
||||
|
||||
__license__ = 'GPL 3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
from calibre.gui2.store.basic_config import BasicStoreConfig
|
||||
from calibre.gui2.store.opensearch_store import OpenSearchStore
|
||||
from calibre.gui2.store.search_result import SearchResult
|
||||
|
||||
class FeedbooksStore(BasicStoreConfig, OpenSearchStore):
|
||||
|
||||
open_search_url = 'http://assets0.feedbooks.net/opensearch.xml?t=1253087147'
|
||||
web_url = 'http://feedbooks.com/'
|
||||
|
||||
# http://www.feedbooks.com/catalog
|
||||
|
||||
def search(self, query, max_results=10, timeout=60):
|
||||
for s in OpenSearchStore.search(self, query, max_results, timeout):
|
||||
if s.downloads:
|
||||
s.drm = SearchResult.DRM_UNLOCKED
|
||||
s.price = '$0.00'
|
||||
else:
|
||||
s.drm = SearchResult.DRM_LOCKED
|
||||
s.formats = 'EPUB'
|
||||
yield s
|
@ -20,7 +20,7 @@ from calibre.gui2.store import StorePlugin
|
||||
from calibre.gui2.store.basic_config import BasicStoreConfig
|
||||
from calibre.gui2.store.search_result import SearchResult
|
||||
from calibre.gui2.store.web_store_dialog import WebStoreDialog
|
||||
|
||||
|
||||
class ManyBooksStore(BasicStoreConfig, StorePlugin):
|
||||
|
||||
def open(self, parent=None, detail_item=None, external=False):
|
||||
@ -29,7 +29,7 @@ class ManyBooksStore(BasicStoreConfig, StorePlugin):
|
||||
detail_url = None
|
||||
if detail_item:
|
||||
detail_url = url + detail_item
|
||||
|
||||
|
||||
if external or self.config.get('open_external', False):
|
||||
open_url(QUrl(url_slash_cleaner(detail_url if detail_url else url)))
|
||||
else:
|
||||
@ -44,16 +44,16 @@ class ManyBooksStore(BasicStoreConfig, StorePlugin):
|
||||
# secondary titles. Google is also faster.
|
||||
# Using a google search so we can search on both fields at once.
|
||||
url = 'http://www.google.com/xhtml?q=site:manybooks.net+' + urllib.quote_plus(query)
|
||||
|
||||
|
||||
br = browser()
|
||||
|
||||
|
||||
counter = max_results
|
||||
with closing(br.open(url, timeout=timeout)) as f:
|
||||
doc = html.fromstring(f.read())
|
||||
for data in doc.xpath('//div[@class="edewpi"]//div[@class="r ld"]'):
|
||||
if counter <= 0:
|
||||
break
|
||||
|
||||
|
||||
url = ''
|
||||
url_a = data.xpath('div[@class="jd"]/a')
|
||||
if url_a:
|
||||
@ -65,13 +65,13 @@ class ManyBooksStore(BasicStoreConfig, StorePlugin):
|
||||
continue
|
||||
id = url.split('/')[-1]
|
||||
id = id.strip()
|
||||
|
||||
|
||||
url_a = html.fromstring(html.tostring(url_a))
|
||||
heading = ''.join(url_a.xpath('//text()'))
|
||||
title, _, author = heading.rpartition('by ')
|
||||
author = author.split('-')[0]
|
||||
price = '$0.00'
|
||||
|
||||
|
||||
cover_url = ''
|
||||
mo = re.match('^\D+', id)
|
||||
if mo:
|
||||
@ -79,10 +79,9 @@ class ManyBooksStore(BasicStoreConfig, StorePlugin):
|
||||
cover_name = cover_name.replace('etext', '')
|
||||
cover_id = id.split('.')[0]
|
||||
cover_url = 'http://www.manybooks.net/images/' + id[0] + '/' + cover_name + '/' + cover_id + '-thumb.jpg'
|
||||
print(cover_url)
|
||||
|
||||
counter -= 1
|
||||
|
||||
|
||||
s = SearchResult()
|
||||
s.cover_url = cover_url
|
||||
s.title = title.strip()
|
||||
@ -91,5 +90,5 @@ class ManyBooksStore(BasicStoreConfig, StorePlugin):
|
||||
s.detail_item = '/titles/' + id
|
||||
s.drm = SearchResult.DRM_UNLOCKED
|
||||
s.formts = 'EPUB, PDB (eReader, PalmDoc, zTXT, Plucker, iSilo), FB2, ZIP, AZW, MOBI, PRC, LIT, PKG, PDF, TXT, RB, RTF, LRF, TCR, JAR'
|
||||
|
||||
|
||||
yield s
|
@ -10,7 +10,7 @@ import re
|
||||
|
||||
from PyQt4.Qt import (QDialog, QDialogButtonBox)
|
||||
|
||||
from calibre.gui2.store.mobileread.adv_search_builder_ui import Ui_Dialog
|
||||
from calibre.gui2.store.stores.mobileread.adv_search_builder_ui import Ui_Dialog
|
||||
from calibre.library.caches import CONTAINS_MATCH, EQUALS_MATCH
|
||||
|
||||
class AdvSearchBuilderDialog(QDialog, Ui_Dialog):
|
@ -8,7 +8,7 @@ __docformat__ = 'restructuredtext en'
|
||||
|
||||
from PyQt4.Qt import QDialog
|
||||
|
||||
from calibre.gui2.store.mobileread.cache_progress_dialog_ui import Ui_Dialog
|
||||
from calibre.gui2.store.stores.mobileread.cache_progress_dialog_ui import Ui_Dialog
|
||||
|
||||
class CacheProgressDialog(QDialog, Ui_Dialog):
|
||||
|
@ -15,10 +15,10 @@ from calibre.gui2.store import StorePlugin
|
||||
from calibre.gui2.store.basic_config import BasicStoreConfig
|
||||
from calibre.gui2.store.search_result import SearchResult
|
||||
from calibre.gui2.store.web_store_dialog import WebStoreDialog
|
||||
from calibre.gui2.store.mobileread.models import SearchFilter
|
||||
from calibre.gui2.store.mobileread.cache_progress_dialog import CacheProgressDialog
|
||||
from calibre.gui2.store.mobileread.cache_update_thread import CacheUpdateThread
|
||||
from calibre.gui2.store.mobileread.store_dialog import MobileReadStoreDialog
|
||||
from calibre.gui2.store.stores.mobileread.models import SearchFilter
|
||||
from calibre.gui2.store.stores.mobileread.cache_progress_dialog import CacheProgressDialog
|
||||
from calibre.gui2.store.stores.mobileread.cache_update_thread import CacheUpdateThread
|
||||
from calibre.gui2.store.stores.mobileread.store_dialog import MobileReadStoreDialog
|
||||
|
||||
class MobileReadStore(BasicStoreConfig, StorePlugin):
|
||||
|
@ -9,9 +9,9 @@ __docformat__ = 'restructuredtext en'
|
||||
|
||||
from PyQt4.Qt import (Qt, QDialog, QIcon, QComboBox)
|
||||
|
||||
from calibre.gui2.store.mobileread.adv_search_builder import AdvSearchBuilderDialog
|
||||
from calibre.gui2.store.mobileread.models import BooksModel
|
||||
from calibre.gui2.store.mobileread.store_dialog_ui import Ui_Dialog
|
||||
from calibre.gui2.store.stores.mobileread.adv_search_builder import AdvSearchBuilderDialog
|
||||
from calibre.gui2.store.stores.mobileread.models import BooksModel
|
||||
from calibre.gui2.store.stores.mobileread.store_dialog_ui import Ui_Dialog
|
||||
|
||||
class MobileReadStoreDialog(QDialog, Ui_Dialog):
|
||||
|
@ -20,59 +20,56 @@ from calibre.gui2.store.basic_config import BasicStoreConfig
|
||||
from calibre.gui2.store.search_result import SearchResult
|
||||
from calibre.gui2.store.web_store_dialog import WebStoreDialog
|
||||
|
||||
class EpubBudStore(BasicStoreConfig, StorePlugin):
|
||||
class OpenBooksStore(BasicStoreConfig, StorePlugin):
|
||||
|
||||
def open(self, parent=None, detail_item=None, external=False):
|
||||
url = 'http://epubbud.com/'
|
||||
url = 'http://drmfree.calibre-ebook.com/'
|
||||
|
||||
if external or self.config.get('open_external', False):
|
||||
open_url(QUrl(url_slash_cleaner(detail_item if detail_item else url)))
|
||||
else:
|
||||
d = WebStoreDialog(self.gui, url, parent, detail_item)
|
||||
d = WebStoreDialog(self.gui, self.url, parent, detail_item)
|
||||
d.setWindowTitle(self.name)
|
||||
d.set_tags(self.config.get('tags', ''))
|
||||
d.exec_()
|
||||
|
||||
def search(self, query, max_results=10, timeout=60):
|
||||
'''
|
||||
OPDS based search.
|
||||
|
||||
We really should get the catelog from http://pragprog.com/catalog.opds
|
||||
and look for the application/opensearchdescription+xml entry.
|
||||
Then get the opensearch description to get the search url and
|
||||
format. However, we are going to be lazy and hard code it.
|
||||
'''
|
||||
url = 'http://www.epubbud.com/search.php?format=atom&q=' + urllib.quote_plus(query)
|
||||
|
||||
url = 'http://drmfree.calibre-ebook.com/search/?q=' + urllib.quote_plus(query)
|
||||
|
||||
br = browser()
|
||||
|
||||
|
||||
counter = max_results
|
||||
with closing(br.open(url, timeout=timeout)) as f:
|
||||
# Use html instead of etree as html allows us
|
||||
# to ignore the namespace easily.
|
||||
doc = html.fromstring(f.read())
|
||||
for data in doc.xpath('//entry'):
|
||||
for data in doc.xpath('//ul[@id="object_list"]//li'):
|
||||
if counter <= 0:
|
||||
break
|
||||
|
||||
id = ''.join(data.xpath('.//id/text()'))
|
||||
|
||||
id = ''.join(data.xpath('.//div[@class="links"]/a[1]/@href'))
|
||||
id = id.strip()
|
||||
if not id:
|
||||
continue
|
||||
|
||||
cover_url = ''.join(data.xpath('.//link[@rel="http://opds-spec.org/thumbnail"]/@href'))
|
||||
|
||||
title = u''.join(data.xpath('.//title/text()'))
|
||||
author = u''.join(data.xpath('.//author/name/text()'))
|
||||
cover_url = ''.join(data.xpath('.//div[@class="cover"]/img/@src'))
|
||||
|
||||
price = ''.join(data.xpath('.//div[@class="price"]/text()'))
|
||||
a, b, price = price.partition('Price:')
|
||||
price = price.strip()
|
||||
if not price:
|
||||
continue
|
||||
|
||||
title = ''.join(data.xpath('.//div/strong/text()'))
|
||||
author = ''.join(data.xpath('.//div[@class="author"]//text()'))
|
||||
author = author.partition('by')[-1]
|
||||
|
||||
counter -= 1
|
||||
|
||||
|
||||
s = SearchResult()
|
||||
s.cover_url = cover_url
|
||||
s.title = title.strip()
|
||||
s.author = author.strip()
|
||||
s.price = '$0.00'
|
||||
s.price = price.strip()
|
||||
s.detail_item = id.strip()
|
||||
s.drm = SearchResult.DRM_UNLOCKED
|
||||
s.formats = 'EPUB'
|
||||
|
||||
|
||||
yield s
|
24
src/calibre/gui2/store/stores/pragmatic_bookshelf_plugin.py
Normal file
24
src/calibre/gui2/store/stores/pragmatic_bookshelf_plugin.py
Normal file
@ -0,0 +1,24 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import (unicode_literals, division, absolute_import, print_function)
|
||||
|
||||
__license__ = 'GPL 3'
|
||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
from calibre.gui2.store.basic_config import BasicStoreConfig
|
||||
from calibre.gui2.store.opensearch_store import OpenSearchStore
|
||||
from calibre.gui2.store.search_result import SearchResult
|
||||
|
||||
class PragmaticBookshelfStore(BasicStoreConfig, OpenSearchStore):
|
||||
|
||||
open_search_url = 'http://pragprog.com/catalog/search-description'
|
||||
web_url = 'http://pragprog.com/'
|
||||
|
||||
# http://pragprog.com/catalog.opds
|
||||
|
||||
def search(self, query, max_results=10, timeout=60):
|
||||
for s in OpenSearchStore.search(self, query, max_results, timeout):
|
||||
s.drm = SearchResult.DRM_UNLOCKED
|
||||
s.formats = 'EPUB, PDF, MOBI'
|
||||
yield s
|
11
src/calibre/gui2/tag_browser/__init__.py
Normal file
11
src/calibre/gui2/tag_browser/__init__.py
Normal file
@ -0,0 +1,11 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
|
||||
|
1294
src/calibre/gui2/tag_browser/model.py
Normal file
1294
src/calibre/gui2/tag_browser/model.py
Normal file
File diff suppressed because it is too large
Load Diff
458
src/calibre/gui2/tag_browser/ui.py
Normal file
458
src/calibre/gui2/tag_browser/ui.py
Normal file
@ -0,0 +1,458 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
from functools import partial
|
||||
|
||||
from PyQt4.Qt import (Qt, QIcon, QWidget, QHBoxLayout, QVBoxLayout, QShortcut,
|
||||
QKeySequence, QToolButton, QString, QLabel, QFrame, QTimer, QComboBox,
|
||||
QMenu, QPushButton)
|
||||
|
||||
from calibre.gui2 import error_dialog, question_dialog
|
||||
from calibre.gui2.widgets import HistoryLineEdit
|
||||
from calibre.library.field_metadata import category_icon_map
|
||||
from calibre.utils.icu import sort_key
|
||||
from calibre.gui2.tag_browser.view import TagsView
|
||||
from calibre.ebooks.metadata import title_sort
|
||||
from calibre.gui2.dialogs.tag_categories import TagCategories
|
||||
from calibre.gui2.dialogs.tag_list_editor import TagListEditor
|
||||
from calibre.gui2.dialogs.edit_authors_dialog import EditAuthorsDialog
|
||||
|
||||
class TagBrowserMixin(object): # {{{
|
||||
|
||||
def __init__(self, db):
|
||||
self.library_view.model().count_changed_signal.connect(self.tags_view.recount)
|
||||
self.tags_view.set_database(db, self.tag_match, self.sort_by)
|
||||
self.tags_view.tags_marked.connect(self.search.set_search_string)
|
||||
self.tags_view.tag_list_edit.connect(self.do_tags_list_edit)
|
||||
self.tags_view.edit_user_category.connect(self.do_edit_user_categories)
|
||||
self.tags_view.delete_user_category.connect(self.do_delete_user_category)
|
||||
self.tags_view.del_item_from_user_cat.connect(self.do_del_item_from_user_cat)
|
||||
self.tags_view.add_subcategory.connect(self.do_add_subcategory)
|
||||
self.tags_view.add_item_to_user_cat.connect(self.do_add_item_to_user_cat)
|
||||
self.tags_view.saved_search_edit.connect(self.do_saved_search_edit)
|
||||
self.tags_view.rebuild_saved_searches.connect(self.do_rebuild_saved_searches)
|
||||
self.tags_view.author_sort_edit.connect(self.do_author_sort_edit)
|
||||
self.tags_view.tag_item_renamed.connect(self.do_tag_item_renamed)
|
||||
self.tags_view.search_item_renamed.connect(self.saved_searches_changed)
|
||||
self.tags_view.drag_drop_finished.connect(self.drag_drop_finished)
|
||||
self.tags_view.restriction_error.connect(self.do_restriction_error,
|
||||
type=Qt.QueuedConnection)
|
||||
|
||||
for text, func, args, cat_name in (
|
||||
(_('Manage Authors'),
|
||||
self.do_author_sort_edit, (self, None), 'authors'),
|
||||
(_('Manage Series'),
|
||||
self.do_tags_list_edit, (None, 'series'), 'series'),
|
||||
(_('Manage Publishers'),
|
||||
self.do_tags_list_edit, (None, 'publisher'), 'publisher'),
|
||||
(_('Manage Tags'),
|
||||
self.do_tags_list_edit, (None, 'tags'), 'tags'),
|
||||
(_('Manage User Categories'),
|
||||
self.do_edit_user_categories, (None,), 'user:'),
|
||||
(_('Manage Saved Searches'),
|
||||
self.do_saved_search_edit, (None,), 'search')
|
||||
):
|
||||
self.manage_items_button.menu().addAction(
|
||||
QIcon(I(category_icon_map[cat_name])),
|
||||
text, partial(func, *args))
|
||||
|
||||
def do_restriction_error(self):
|
||||
error_dialog(self.tags_view, _('Invalid search restriction'),
|
||||
_('The current search restriction is invalid'), show=True)
|
||||
|
||||
def do_add_subcategory(self, on_category_key, new_category_name=None):
|
||||
'''
|
||||
Add a subcategory to the category 'on_category'. If new_category_name is
|
||||
None, then a default name is shown and the user is offered the
|
||||
opportunity to edit the name.
|
||||
'''
|
||||
db = self.library_view.model().db
|
||||
user_cats = db.prefs.get('user_categories', {})
|
||||
|
||||
# Ensure that the temporary name we will use is not already there
|
||||
i = 0
|
||||
if new_category_name is not None:
|
||||
new_name = new_category_name.replace('.', '')
|
||||
else:
|
||||
new_name = _('New Category').replace('.', '')
|
||||
n = new_name
|
||||
while True:
|
||||
new_cat = on_category_key[1:] + '.' + n
|
||||
if new_cat not in user_cats:
|
||||
break
|
||||
i += 1
|
||||
n = new_name + unicode(i)
|
||||
# Add the new category
|
||||
user_cats[new_cat] = []
|
||||
db.prefs.set('user_categories', user_cats)
|
||||
self.tags_view.set_new_model()
|
||||
m = self.tags_view.model()
|
||||
idx = m.index_for_path(m.find_category_node('@' + new_cat))
|
||||
m.show_item_at_index(idx)
|
||||
# Open the editor on the new item to rename it
|
||||
if new_category_name is None:
|
||||
self.tags_view.edit(idx)
|
||||
|
||||
def do_edit_user_categories(self, on_category=None):
|
||||
'''
|
||||
Open the user categories editor.
|
||||
'''
|
||||
db = self.library_view.model().db
|
||||
d = TagCategories(self, db, on_category)
|
||||
if d.exec_() == d.Accepted:
|
||||
db.prefs.set('user_categories', d.categories)
|
||||
db.field_metadata.remove_user_categories()
|
||||
for k in d.categories:
|
||||
db.field_metadata.add_user_category('@' + k, k)
|
||||
db.data.change_search_locations(db.field_metadata.get_search_terms())
|
||||
self.tags_view.set_new_model()
|
||||
|
||||
def do_delete_user_category(self, category_name):
|
||||
'''
|
||||
Delete the user category named category_name. Any leading '@' is removed
|
||||
'''
|
||||
if category_name.startswith('@'):
|
||||
category_name = category_name[1:]
|
||||
db = self.library_view.model().db
|
||||
user_cats = db.prefs.get('user_categories', {})
|
||||
cat_keys = sorted(user_cats.keys(), key=sort_key)
|
||||
has_children = False
|
||||
found = False
|
||||
for k in cat_keys:
|
||||
if k == category_name:
|
||||
found = True
|
||||
has_children = len(user_cats[k])
|
||||
elif k.startswith(category_name + '.'):
|
||||
has_children = True
|
||||
if not found:
|
||||
return error_dialog(self.tags_view, _('Delete user category'),
|
||||
_('%s is not a user category')%category_name, show=True)
|
||||
if has_children:
|
||||
if not question_dialog(self.tags_view, _('Delete user category'),
|
||||
_('%s contains items. Do you really '
|
||||
'want to delete it?')%category_name):
|
||||
return
|
||||
for k in cat_keys:
|
||||
if k == category_name:
|
||||
del user_cats[k]
|
||||
elif k.startswith(category_name + '.'):
|
||||
del user_cats[k]
|
||||
db.prefs.set('user_categories', user_cats)
|
||||
self.tags_view.set_new_model()
|
||||
|
||||
def do_del_item_from_user_cat(self, user_cat, item_name, item_category):
|
||||
'''
|
||||
Delete the item (item_name, item_category) from the user category with
|
||||
key user_cat. Any leading '@' characters are removed
|
||||
'''
|
||||
if user_cat.startswith('@'):
|
||||
user_cat = user_cat[1:]
|
||||
db = self.library_view.model().db
|
||||
user_cats = db.prefs.get('user_categories', {})
|
||||
if user_cat not in user_cats:
|
||||
error_dialog(self.tags_view, _('Remove category'),
|
||||
_('User category %s does not exist')%user_cat,
|
||||
show=True)
|
||||
return
|
||||
self.tags_view.model().delete_item_from_user_category(user_cat,
|
||||
item_name, item_category)
|
||||
self.tags_view.recount()
|
||||
|
||||
def do_add_item_to_user_cat(self, dest_category, src_name, src_category):
|
||||
'''
|
||||
Add the item src_name in src_category to the user category
|
||||
dest_category. Any leading '@' is removed
|
||||
'''
|
||||
db = self.library_view.model().db
|
||||
user_cats = db.prefs.get('user_categories', {})
|
||||
|
||||
if dest_category and dest_category.startswith('@'):
|
||||
dest_category = dest_category[1:]
|
||||
|
||||
if dest_category not in user_cats:
|
||||
return error_dialog(self.tags_view, _('Add to user category'),
|
||||
_('A user category %s does not exist')%dest_category, show=True)
|
||||
|
||||
# Now add the item to the destination user category
|
||||
add_it = True
|
||||
if src_category == 'news':
|
||||
src_category = 'tags'
|
||||
for tup in user_cats[dest_category]:
|
||||
if src_name == tup[0] and src_category == tup[1]:
|
||||
add_it = False
|
||||
if add_it:
|
||||
user_cats[dest_category].append([src_name, src_category, 0])
|
||||
db.prefs.set('user_categories', user_cats)
|
||||
self.tags_view.recount()
|
||||
|
||||
def do_tags_list_edit(self, tag, category):
|
||||
'''
|
||||
Open the 'manage_X' dialog where X == category. If tag is not None, the
|
||||
dialog will position the editor on that item.
|
||||
'''
|
||||
db=self.library_view.model().db
|
||||
if category == 'tags':
|
||||
result = db.get_tags_with_ids()
|
||||
key = sort_key
|
||||
elif category == 'series':
|
||||
result = db.get_series_with_ids()
|
||||
key = lambda x:sort_key(title_sort(x))
|
||||
elif category == 'publisher':
|
||||
result = db.get_publishers_with_ids()
|
||||
key = sort_key
|
||||
else: # should be a custom field
|
||||
cc_label = None
|
||||
if category in db.field_metadata:
|
||||
cc_label = db.field_metadata[category]['label']
|
||||
result = db.get_custom_items_with_ids(label=cc_label)
|
||||
else:
|
||||
result = []
|
||||
key = sort_key
|
||||
|
||||
d = TagListEditor(self, tag_to_match=tag, data=result, key=key)
|
||||
d.exec_()
|
||||
if d.result() == d.Accepted:
|
||||
to_rename = d.to_rename # dict of new text to old id
|
||||
to_delete = d.to_delete # list of ids
|
||||
orig_name = d.original_names # dict of id: name
|
||||
|
||||
rename_func = None
|
||||
if category == 'tags':
|
||||
rename_func = db.rename_tag
|
||||
delete_func = db.delete_tag_using_id
|
||||
elif category == 'series':
|
||||
rename_func = db.rename_series
|
||||
delete_func = db.delete_series_using_id
|
||||
elif category == 'publisher':
|
||||
rename_func = db.rename_publisher
|
||||
delete_func = db.delete_publisher_using_id
|
||||
else:
|
||||
rename_func = partial(db.rename_custom_item, label=cc_label)
|
||||
delete_func = partial(db.delete_custom_item_using_id, label=cc_label)
|
||||
m = self.tags_view.model()
|
||||
if rename_func:
|
||||
for item in to_delete:
|
||||
delete_func(item)
|
||||
m.delete_item_from_all_user_categories(orig_name[item], category)
|
||||
for old_id in to_rename:
|
||||
rename_func(old_id, new_name=unicode(to_rename[old_id]))
|
||||
m.rename_item_in_all_user_categories(orig_name[old_id],
|
||||
category, unicode(to_rename[old_id]))
|
||||
|
||||
# Clean up the library view
|
||||
self.do_tag_item_renamed()
|
||||
self.tags_view.recount()
|
||||
|
||||
def do_tag_item_renamed(self):
|
||||
# Clean up library view and search
|
||||
# get information to redo the selection
|
||||
rows = [r.row() for r in \
|
||||
self.library_view.selectionModel().selectedRows()]
|
||||
m = self.library_view.model()
|
||||
ids = [m.id(r) for r in rows]
|
||||
|
||||
m.refresh(reset=False)
|
||||
m.research()
|
||||
self.library_view.select_rows(ids)
|
||||
# refreshing the tags view happens at the emit()/call() site
|
||||
|
||||
def do_author_sort_edit(self, parent, id, select_sort=True):
|
||||
'''
|
||||
Open the manage authors dialog
|
||||
'''
|
||||
db = self.library_view.model().db
|
||||
editor = EditAuthorsDialog(parent, db, id, select_sort)
|
||||
d = editor.exec_()
|
||||
if d:
|
||||
for (id, old_author, new_author, new_sort) in editor.result:
|
||||
if old_author != new_author:
|
||||
# The id might change if the new author already exists
|
||||
id = db.rename_author(id, new_author)
|
||||
db.set_sort_field_for_author(id, unicode(new_sort),
|
||||
commit=False, notify=False)
|
||||
db.commit()
|
||||
self.library_view.model().refresh()
|
||||
self.tags_view.recount()
|
||||
|
||||
def drag_drop_finished(self, ids):
|
||||
self.library_view.model().refresh_ids(ids)
|
||||
|
||||
# }}}
|
||||
|
||||
class TagBrowserWidget(QWidget): # {{{
|
||||
|
||||
def __init__(self, parent):
|
||||
QWidget.__init__(self, parent)
|
||||
self.parent = parent
|
||||
self._layout = QVBoxLayout()
|
||||
self.setLayout(self._layout)
|
||||
self._layout.setContentsMargins(0,0,0,0)
|
||||
|
||||
# Set up the find box & button
|
||||
search_layout = QHBoxLayout()
|
||||
self._layout.addLayout(search_layout)
|
||||
self.item_search = HistoryLineEdit(parent)
|
||||
try:
|
||||
self.item_search.lineEdit().setPlaceholderText(
|
||||
_('Find item in tag browser'))
|
||||
except:
|
||||
pass # Using Qt < 4.7
|
||||
self.item_search.setToolTip(_(
|
||||
'Search for items. This is a "contains" search; items containing the\n'
|
||||
'text anywhere in the name will be found. You can limit the search\n'
|
||||
'to particular categories using syntax similar to search. For example,\n'
|
||||
'tags:foo will find foo in any tag, but not in authors etc. Entering\n'
|
||||
'*foo will filter all categories at once, showing only those items\n'
|
||||
'containing the text "foo"'))
|
||||
search_layout.addWidget(self.item_search)
|
||||
# Not sure if the shortcut should be translatable ...
|
||||
sc = QShortcut(QKeySequence(_('ALT+f')), parent)
|
||||
sc.activated.connect(self.set_focus_to_find_box)
|
||||
|
||||
self.search_button = QToolButton()
|
||||
self.search_button.setText(_('F&ind'))
|
||||
self.search_button.setToolTip(_('Find the first/next matching item'))
|
||||
search_layout.addWidget(self.search_button)
|
||||
|
||||
self.expand_button = QToolButton()
|
||||
self.expand_button.setText('-')
|
||||
self.expand_button.setToolTip(_('Collapse all categories'))
|
||||
search_layout.addWidget(self.expand_button)
|
||||
search_layout.setStretch(0, 10)
|
||||
search_layout.setStretch(1, 1)
|
||||
search_layout.setStretch(2, 1)
|
||||
|
||||
self.current_find_position = None
|
||||
self.search_button.clicked.connect(self.find)
|
||||
self.item_search.initialize('tag_browser_search')
|
||||
self.item_search.lineEdit().returnPressed.connect(self.do_find)
|
||||
self.item_search.lineEdit().textEdited.connect(self.find_text_changed)
|
||||
self.item_search.activated[QString].connect(self.do_find)
|
||||
self.item_search.completer().setCaseSensitivity(Qt.CaseSensitive)
|
||||
|
||||
parent.tags_view = TagsView(parent)
|
||||
self.tags_view = parent.tags_view
|
||||
self.expand_button.clicked.connect(self.tags_view.collapseAll)
|
||||
self._layout.addWidget(parent.tags_view)
|
||||
|
||||
# Now the floating 'not found' box
|
||||
l = QLabel(self.tags_view)
|
||||
self.not_found_label = l
|
||||
l.setFrameStyle(QFrame.StyledPanel)
|
||||
l.setAutoFillBackground(True)
|
||||
l.setText('<p><b>'+_('No More Matches.</b><p> Click Find again to go to first match'))
|
||||
l.setAlignment(Qt.AlignVCenter)
|
||||
l.setWordWrap(True)
|
||||
l.resize(l.sizeHint())
|
||||
l.move(10,20)
|
||||
l.setVisible(False)
|
||||
self.not_found_label_timer = QTimer()
|
||||
self.not_found_label_timer.setSingleShot(True)
|
||||
self.not_found_label_timer.timeout.connect(self.not_found_label_timer_event,
|
||||
type=Qt.QueuedConnection)
|
||||
|
||||
parent.sort_by = QComboBox(parent)
|
||||
# Must be in the same order as db2.CATEGORY_SORTS
|
||||
for x in (_('Sort by name'), _('Sort by popularity'),
|
||||
_('Sort by average rating')):
|
||||
parent.sort_by.addItem(x)
|
||||
parent.sort_by.setToolTip(
|
||||
_('Set the sort order for entries in the Tag Browser'))
|
||||
parent.sort_by.setStatusTip(parent.sort_by.toolTip())
|
||||
parent.sort_by.setCurrentIndex(0)
|
||||
self._layout.addWidget(parent.sort_by)
|
||||
|
||||
# Must be in the same order as db2.MATCH_TYPE
|
||||
parent.tag_match = QComboBox(parent)
|
||||
for x in (_('Match any'), _('Match all')):
|
||||
parent.tag_match.addItem(x)
|
||||
parent.tag_match.setCurrentIndex(0)
|
||||
self._layout.addWidget(parent.tag_match)
|
||||
parent.tag_match.setToolTip(
|
||||
_('When selecting multiple entries in the Tag Browser '
|
||||
'match any or all of them'))
|
||||
parent.tag_match.setStatusTip(parent.tag_match.toolTip())
|
||||
|
||||
|
||||
l = parent.manage_items_button = QPushButton(self)
|
||||
l.setStyleSheet('QPushButton {text-align: left; }')
|
||||
l.setText(_('Manage authors, tags, etc'))
|
||||
l.setToolTip(_('All of these category_managers are available by right-clicking '
|
||||
'on items in the tag browser above'))
|
||||
l.m = QMenu()
|
||||
l.setMenu(l.m)
|
||||
self._layout.addWidget(l)
|
||||
|
||||
# self.leak_test_timer = QTimer(self)
|
||||
# self.leak_test_timer.timeout.connect(self.test_for_leak)
|
||||
# self.leak_test_timer.start(5000)
|
||||
|
||||
def set_pane_is_visible(self, to_what):
|
||||
self.tags_view.set_pane_is_visible(to_what)
|
||||
|
||||
def find_text_changed(self, str):
|
||||
self.current_find_position = None
|
||||
|
||||
def set_focus_to_find_box(self):
|
||||
self.item_search.setFocus()
|
||||
self.item_search.lineEdit().selectAll()
|
||||
|
||||
def do_find(self, str=None):
|
||||
self.current_find_position = None
|
||||
self.find()
|
||||
|
||||
def find(self):
|
||||
model = self.tags_view.model()
|
||||
model.clear_boxed()
|
||||
txt = unicode(self.item_search.currentText()).strip()
|
||||
|
||||
if txt.startswith('*'):
|
||||
self.tags_view.set_new_model(filter_categories_by=txt[1:])
|
||||
self.current_find_position = None
|
||||
return
|
||||
if model.get_filter_categories_by():
|
||||
self.tags_view.set_new_model(filter_categories_by=None)
|
||||
self.current_find_position = None
|
||||
model = self.tags_view.model()
|
||||
|
||||
if not txt:
|
||||
return
|
||||
|
||||
self.item_search.lineEdit().blockSignals(True)
|
||||
self.search_button.setFocus(True)
|
||||
self.item_search.lineEdit().blockSignals(False)
|
||||
|
||||
key = None
|
||||
colon = txt.rfind(':') if len(txt) > 2 else 0
|
||||
if colon > 0:
|
||||
key = self.parent.library_view.model().db.\
|
||||
field_metadata.search_term_to_field_key(txt[:colon])
|
||||
txt = txt[colon+1:]
|
||||
|
||||
self.current_find_position = \
|
||||
model.find_item_node(key, txt, self.current_find_position)
|
||||
if self.current_find_position:
|
||||
model.show_item_at_path(self.current_find_position, box=True)
|
||||
elif self.item_search.text():
|
||||
self.not_found_label.setVisible(True)
|
||||
if self.tags_view.verticalScrollBar().isVisible():
|
||||
sbw = self.tags_view.verticalScrollBar().width()
|
||||
else:
|
||||
sbw = 0
|
||||
width = self.width() - 8 - sbw
|
||||
height = self.not_found_label.heightForWidth(width) + 20
|
||||
self.not_found_label.resize(width, height)
|
||||
self.not_found_label.move(4, 10)
|
||||
self.not_found_label_timer.start(2000)
|
||||
|
||||
def not_found_label_timer_event(self):
|
||||
self.not_found_label.setVisible(False)
|
||||
|
||||
# }}}
|
||||
|
588
src/calibre/gui2/tag_browser/view.py
Normal file
588
src/calibre/gui2/tag_browser/view.py
Normal file
@ -0,0 +1,588 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import (unicode_literals, division, absolute_import,
|
||||
print_function)
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import cPickle
|
||||
from functools import partial
|
||||
from itertools import izip
|
||||
|
||||
from PyQt4.Qt import (QItemDelegate, Qt, QTreeView, pyqtSignal, QSize, QIcon,
|
||||
QApplication, QMenu, QPoint, QModelIndex)
|
||||
|
||||
from calibre.gui2.tag_browser.model import (TagTreeItem, TAG_SEARCH_STATES,
|
||||
TagsModel)
|
||||
from calibre.gui2 import config, gprefs
|
||||
from calibre.utils.search_query_parser import saved_searches
|
||||
from calibre.utils.icu import sort_key
|
||||
|
||||
class TagDelegate(QItemDelegate): # {{{
|
||||
|
||||
def paint(self, painter, option, index):
|
||||
item = index.data(Qt.UserRole).toPyObject()
|
||||
if item.type != TagTreeItem.TAG:
|
||||
QItemDelegate.paint(self, painter, option, index)
|
||||
return
|
||||
r = option.rect
|
||||
model = self.parent().model()
|
||||
icon = model.data(index, Qt.DecorationRole).toPyObject()
|
||||
painter.save()
|
||||
if item.tag.state != 0 or not config['show_avg_rating'] or \
|
||||
item.tag.avg_rating is None:
|
||||
icon.paint(painter, r, Qt.AlignLeft)
|
||||
else:
|
||||
painter.setOpacity(0.3)
|
||||
icon.paint(painter, r, Qt.AlignLeft)
|
||||
painter.setOpacity(1)
|
||||
rating = item.tag.avg_rating
|
||||
painter.setClipRect(r.left(), r.bottom()-int(r.height()*(rating/5.0)),
|
||||
r.width(), r.height())
|
||||
icon.paint(painter, r, Qt.AlignLeft)
|
||||
painter.setClipRect(r)
|
||||
|
||||
# Paint the text
|
||||
if item.boxed:
|
||||
painter.drawRoundedRect(r.adjusted(1,1,-1,-1), 5, 5)
|
||||
r.setLeft(r.left()+r.height()+3)
|
||||
painter.drawText(r, Qt.AlignLeft|Qt.AlignVCenter,
|
||||
model.data(index, Qt.DisplayRole).toString())
|
||||
painter.restore()
|
||||
|
||||
# }}}
|
||||
|
||||
class TagsView(QTreeView): # {{{
|
||||
|
||||
refresh_required = pyqtSignal()
|
||||
tags_marked = pyqtSignal(object)
|
||||
edit_user_category = pyqtSignal(object)
|
||||
delete_user_category = pyqtSignal(object)
|
||||
del_item_from_user_cat = pyqtSignal(object, object, object)
|
||||
add_item_to_user_cat = pyqtSignal(object, object, object)
|
||||
add_subcategory = pyqtSignal(object)
|
||||
tag_list_edit = pyqtSignal(object, object)
|
||||
saved_search_edit = pyqtSignal(object)
|
||||
rebuild_saved_searches = pyqtSignal()
|
||||
author_sort_edit = pyqtSignal(object, object)
|
||||
tag_item_renamed = pyqtSignal()
|
||||
search_item_renamed = pyqtSignal()
|
||||
drag_drop_finished = pyqtSignal(object)
|
||||
restriction_error = pyqtSignal()
|
||||
show_at_path = pyqtSignal()
|
||||
|
||||
def __init__(self, parent=None):
|
||||
QTreeView.__init__(self, parent=None)
|
||||
self.tag_match = None
|
||||
self.disable_recounting = False
|
||||
self.setUniformRowHeights(True)
|
||||
self.setCursor(Qt.PointingHandCursor)
|
||||
self.setIconSize(QSize(30, 30))
|
||||
self.setTabKeyNavigation(True)
|
||||
self.setAlternatingRowColors(True)
|
||||
self.setAnimated(True)
|
||||
self.setHeaderHidden(True)
|
||||
self.setItemDelegate(TagDelegate(self))
|
||||
self.made_connections = False
|
||||
self.setAcceptDrops(True)
|
||||
self.setDragEnabled(True)
|
||||
self.setDragDropMode(self.DragDrop)
|
||||
self.setDropIndicatorShown(True)
|
||||
self.setAutoExpandDelay(500)
|
||||
self.pane_is_visible = False
|
||||
self.search_icon = QIcon(I('search.png'))
|
||||
self.user_category_icon = QIcon(I('tb_folder.png'))
|
||||
self.delete_icon = QIcon(I('list_remove.png'))
|
||||
self.rename_icon = QIcon(I('edit-undo.png'))
|
||||
self.show_at_path.connect(self.show_item_at_path,
|
||||
type=Qt.QueuedConnection)
|
||||
|
||||
self._model = TagsModel(self)
|
||||
self._model.search_item_renamed.connect(self.search_item_renamed)
|
||||
self._model.refresh_required.connect(self.refresh_required,
|
||||
type=Qt.QueuedConnection)
|
||||
self._model.tag_item_renamed.connect(self.tag_item_renamed)
|
||||
self._model.restriction_error.connect(self.restriction_error)
|
||||
self._model.user_categories_edited.connect(self.user_categories_edited,
|
||||
type=Qt.QueuedConnection)
|
||||
self._model.drag_drop_finished.connect(self.drag_drop_finished)
|
||||
|
||||
@property
|
||||
def hidden_categories(self):
|
||||
return self._model.hidden_categories
|
||||
|
||||
@property
|
||||
def db(self):
|
||||
return self._model.db
|
||||
|
||||
@property
|
||||
def collapse_model(self):
|
||||
return self._model.collapse_model
|
||||
|
||||
def set_pane_is_visible(self, to_what):
|
||||
pv = self.pane_is_visible
|
||||
self.pane_is_visible = to_what
|
||||
if to_what and not pv:
|
||||
self.recount()
|
||||
|
||||
def get_state(self):
|
||||
state_map = {}
|
||||
expanded_categories = []
|
||||
for row, category in enumerate(self._model.category_nodes):
|
||||
if self.isExpanded(self._model.index(row, 0, QModelIndex())):
|
||||
expanded_categories.append(category.py_name)
|
||||
states = [c.tag.state for c in category.child_tags()]
|
||||
names = [(c.tag.name, c.tag.category) for c in category.child_tags()]
|
||||
state_map[category.py_name] = dict(izip(names, states))
|
||||
return expanded_categories, state_map
|
||||
|
||||
def reread_collapse_parameters(self):
|
||||
self._model.reread_collapse_parameters(self.get_state()[1])
|
||||
|
||||
def set_database(self, db, tag_match, sort_by):
|
||||
self._model.set_database(db)
|
||||
|
||||
self.pane_is_visible = True # because TagsModel.set_database did a recount
|
||||
self.sort_by = sort_by
|
||||
self.tag_match = tag_match
|
||||
self.setModel(self._model)
|
||||
self.setContextMenuPolicy(Qt.CustomContextMenu)
|
||||
pop = config['sort_tags_by']
|
||||
self.sort_by.setCurrentIndex(self.db.CATEGORY_SORTS.index(pop))
|
||||
try:
|
||||
match_pop = self.db.MATCH_TYPE.index(config['match_tags_type'])
|
||||
except ValueError:
|
||||
match_pop = 0
|
||||
self.tag_match.setCurrentIndex(match_pop)
|
||||
if not self.made_connections:
|
||||
self.clicked.connect(self.toggle)
|
||||
self.customContextMenuRequested.connect(self.show_context_menu)
|
||||
self.refresh_required.connect(self.recount, type=Qt.QueuedConnection)
|
||||
self.sort_by.currentIndexChanged.connect(self.sort_changed)
|
||||
self.tag_match.currentIndexChanged.connect(self.match_changed)
|
||||
self.made_connections = True
|
||||
self.refresh_signal_processed = True
|
||||
db.add_listener(self.database_changed)
|
||||
self.expanded.connect(self.item_expanded)
|
||||
|
||||
def database_changed(self, event, ids):
|
||||
if self.refresh_signal_processed:
|
||||
self.refresh_signal_processed = False
|
||||
self.refresh_required.emit()
|
||||
|
||||
def user_categories_edited(self, user_cats, nkey):
|
||||
state_map = self.get_state()[1]
|
||||
self.db.prefs.set('user_categories', user_cats)
|
||||
self._model.rebuild_node_tree(state_map=state_map)
|
||||
self.show_at_path.emit('@'+nkey)
|
||||
|
||||
@property
|
||||
def match_all(self):
|
||||
return self.tag_match and self.tag_match.currentIndex() > 0
|
||||
|
||||
def sort_changed(self, pop):
|
||||
config.set('sort_tags_by', self.db.CATEGORY_SORTS[pop])
|
||||
self.recount()
|
||||
|
||||
def match_changed(self, pop):
|
||||
try:
|
||||
config.set('match_tags_type', self.db.MATCH_TYPE[pop])
|
||||
except:
|
||||
pass
|
||||
|
||||
def set_search_restriction(self, s):
|
||||
s = s if s else None
|
||||
self._model.set_search_restriction(s)
|
||||
|
||||
def mouseReleaseEvent(self, event):
|
||||
# Swallow everything except leftButton so context menus work correctly
|
||||
if event.button() == Qt.LeftButton:
|
||||
QTreeView.mouseReleaseEvent(self, event)
|
||||
|
||||
def mouseDoubleClickEvent(self, event):
|
||||
# swallow these to avoid toggling and editing at the same time
|
||||
pass
|
||||
|
||||
@property
|
||||
def search_string(self):
|
||||
tokens = self._model.tokens()
|
||||
joiner = ' and ' if self.match_all else ' or '
|
||||
return joiner.join(tokens)
|
||||
|
||||
def toggle(self, index):
|
||||
self._toggle(index, None)
|
||||
|
||||
def _toggle(self, index, set_to):
|
||||
'''
|
||||
set_to: if None, advance the state. Otherwise must be one of the values
|
||||
in TAG_SEARCH_STATES
|
||||
'''
|
||||
modifiers = int(QApplication.keyboardModifiers())
|
||||
exclusive = modifiers not in (Qt.CTRL, Qt.SHIFT)
|
||||
if self._model.toggle(index, exclusive, set_to=set_to):
|
||||
self.tags_marked.emit(self.search_string)
|
||||
|
||||
def conditional_clear(self, search_string):
|
||||
if search_string != self.search_string:
|
||||
self.clear()
|
||||
|
||||
def context_menu_handler(self, action=None, category=None,
|
||||
key=None, index=None, search_state=None):
|
||||
if not action:
|
||||
return
|
||||
try:
|
||||
if action == 'edit_item':
|
||||
self.edit(index)
|
||||
return
|
||||
if action == 'open_editor':
|
||||
self.tag_list_edit.emit(category, key)
|
||||
return
|
||||
if action == 'manage_categories':
|
||||
self.edit_user_category.emit(category)
|
||||
return
|
||||
if action == 'search':
|
||||
self._toggle(index, set_to=search_state)
|
||||
return
|
||||
if action == 'add_to_category':
|
||||
tag = index.tag
|
||||
if len(index.children) > 0:
|
||||
for c in index.children:
|
||||
self.add_item_to_user_cat.emit(category, c.tag.original_name,
|
||||
c.tag.category)
|
||||
self.add_item_to_user_cat.emit(category, tag.original_name,
|
||||
tag.category)
|
||||
return
|
||||
if action == 'add_subcategory':
|
||||
self.add_subcategory.emit(key)
|
||||
return
|
||||
if action == 'search_category':
|
||||
self._toggle(index, set_to=search_state)
|
||||
return
|
||||
if action == 'delete_user_category':
|
||||
self.delete_user_category.emit(key)
|
||||
return
|
||||
if action == 'delete_search':
|
||||
saved_searches().delete(key)
|
||||
self.rebuild_saved_searches.emit()
|
||||
return
|
||||
if action == 'delete_item_from_user_category':
|
||||
tag = index.tag
|
||||
if len(index.children) > 0:
|
||||
for c in index.children:
|
||||
self.del_item_from_user_cat.emit(key, c.tag.original_name,
|
||||
c.tag.category)
|
||||
self.del_item_from_user_cat.emit(key, tag.original_name, tag.category)
|
||||
return
|
||||
if action == 'manage_searches':
|
||||
self.saved_search_edit.emit(category)
|
||||
return
|
||||
if action == 'edit_author_sort':
|
||||
self.author_sort_edit.emit(self, index)
|
||||
return
|
||||
|
||||
reset_filter_categories = True
|
||||
if action == 'hide':
|
||||
self.hidden_categories.add(category)
|
||||
elif action == 'show':
|
||||
self.hidden_categories.discard(category)
|
||||
elif action == 'categorization':
|
||||
changed = self.collapse_model != category
|
||||
self._model.collapse_model = category
|
||||
if changed:
|
||||
reset_filter_categories = False
|
||||
gprefs['tags_browser_partition_method'] = category
|
||||
elif action == 'defaults':
|
||||
self.hidden_categories.clear()
|
||||
self.db.prefs.set('tag_browser_hidden_categories', list(self.hidden_categories))
|
||||
if reset_filter_categories:
|
||||
self._model.filter_categories_by = None
|
||||
self._model.rebuild_node_tree()
|
||||
except:
|
||||
return
|
||||
|
||||
def show_context_menu(self, point):
|
||||
def display_name( tag):
|
||||
if tag.category == 'search':
|
||||
n = tag.name
|
||||
if len(n) > 45:
|
||||
n = n[:45] + '...'
|
||||
return "'" + n + "'"
|
||||
return tag.name
|
||||
|
||||
index = self.indexAt(point)
|
||||
self.context_menu = QMenu(self)
|
||||
|
||||
if index.isValid():
|
||||
item = index.data(Qt.UserRole).toPyObject()
|
||||
tag = None
|
||||
|
||||
if item.type == TagTreeItem.TAG:
|
||||
tag_item = item
|
||||
tag = item.tag
|
||||
while item.type != TagTreeItem.CATEGORY:
|
||||
item = item.parent
|
||||
|
||||
if item.type == TagTreeItem.CATEGORY:
|
||||
if not item.category_key.startswith('@'):
|
||||
while item.parent != self._model.root_item:
|
||||
item = item.parent
|
||||
category = unicode(item.name.toString())
|
||||
key = item.category_key
|
||||
# Verify that we are working with a field that we know something about
|
||||
if key not in self.db.field_metadata:
|
||||
return True
|
||||
|
||||
# Did the user click on a leaf node?
|
||||
if tag:
|
||||
# If the user right-clicked on an editable item, then offer
|
||||
# the possibility of renaming that item.
|
||||
if tag.is_editable:
|
||||
# Add the 'rename' items
|
||||
self.context_menu.addAction(self.rename_icon,
|
||||
_('Rename %s')%display_name(tag),
|
||||
partial(self.context_menu_handler, action='edit_item',
|
||||
index=index))
|
||||
if key == 'authors':
|
||||
self.context_menu.addAction(_('Edit sort for %s')%display_name(tag),
|
||||
partial(self.context_menu_handler,
|
||||
action='edit_author_sort', index=tag.id))
|
||||
|
||||
# is_editable is also overloaded to mean 'can be added
|
||||
# to a user category'
|
||||
m = self.context_menu.addMenu(self.user_category_icon,
|
||||
_('Add %s to user category')%display_name(tag))
|
||||
nt = self.model().category_node_tree
|
||||
def add_node_tree(tree_dict, m, path):
|
||||
p = path[:]
|
||||
for k in sorted(tree_dict.keys(), key=sort_key):
|
||||
p.append(k)
|
||||
n = k[1:] if k.startswith('@') else k
|
||||
m.addAction(self.user_category_icon, n,
|
||||
partial(self.context_menu_handler,
|
||||
'add_to_category',
|
||||
category='.'.join(p), index=tag_item))
|
||||
if len(tree_dict[k]):
|
||||
tm = m.addMenu(self.user_category_icon,
|
||||
_('Children of %s')%n)
|
||||
add_node_tree(tree_dict[k], tm, p)
|
||||
p.pop()
|
||||
add_node_tree(nt, m, [])
|
||||
elif key == 'search':
|
||||
self.context_menu.addAction(self.rename_icon,
|
||||
_('Rename %s')%display_name(tag),
|
||||
partial(self.context_menu_handler, action='edit_item',
|
||||
index=index))
|
||||
self.context_menu.addAction(self.delete_icon,
|
||||
_('Delete search %s')%display_name(tag),
|
||||
partial(self.context_menu_handler,
|
||||
action='delete_search', key=tag.name))
|
||||
if key.startswith('@') and not item.is_gst:
|
||||
self.context_menu.addAction(self.user_category_icon,
|
||||
_('Remove %s from category %s')%
|
||||
(display_name(tag), item.py_name),
|
||||
partial(self.context_menu_handler,
|
||||
action='delete_item_from_user_category',
|
||||
key = key, index = tag_item))
|
||||
# Add the search for value items. All leaf nodes are searchable
|
||||
self.context_menu.addAction(self.search_icon,
|
||||
_('Search for %s')%display_name(tag),
|
||||
partial(self.context_menu_handler, action='search',
|
||||
search_state=TAG_SEARCH_STATES['mark_plus'],
|
||||
index=index))
|
||||
self.context_menu.addAction(self.search_icon,
|
||||
_('Search for everything but %s')%display_name(tag),
|
||||
partial(self.context_menu_handler, action='search',
|
||||
search_state=TAG_SEARCH_STATES['mark_minus'],
|
||||
index=index))
|
||||
self.context_menu.addSeparator()
|
||||
elif key.startswith('@') and not item.is_gst:
|
||||
if item.can_be_edited:
|
||||
self.context_menu.addAction(self.rename_icon,
|
||||
_('Rename %s')%item.py_name,
|
||||
partial(self.context_menu_handler, action='edit_item',
|
||||
index=index))
|
||||
self.context_menu.addAction(self.user_category_icon,
|
||||
_('Add sub-category to %s')%item.py_name,
|
||||
partial(self.context_menu_handler,
|
||||
action='add_subcategory', key=key))
|
||||
self.context_menu.addAction(self.delete_icon,
|
||||
_('Delete user category %s')%item.py_name,
|
||||
partial(self.context_menu_handler,
|
||||
action='delete_user_category', key=key))
|
||||
self.context_menu.addSeparator()
|
||||
# Hide/Show/Restore categories
|
||||
self.context_menu.addAction(_('Hide category %s') % category,
|
||||
partial(self.context_menu_handler, action='hide',
|
||||
category=key))
|
||||
if self.hidden_categories:
|
||||
m = self.context_menu.addMenu(_('Show category'))
|
||||
for col in sorted(self.hidden_categories,
|
||||
key=lambda x: sort_key(self.db.field_metadata[x]['name'])):
|
||||
m.addAction(self.db.field_metadata[col]['name'],
|
||||
partial(self.context_menu_handler, action='show', category=col))
|
||||
|
||||
# search by category. Some categories are not searchable, such
|
||||
# as search and news
|
||||
if item.tag.is_searchable:
|
||||
self.context_menu.addAction(self.search_icon,
|
||||
_('Search for books in category %s')%category,
|
||||
partial(self.context_menu_handler,
|
||||
action='search_category',
|
||||
index=self._model.createIndex(item.row(), 0, item),
|
||||
search_state=TAG_SEARCH_STATES['mark_plus']))
|
||||
self.context_menu.addAction(self.search_icon,
|
||||
_('Search for books not in category %s')%category,
|
||||
partial(self.context_menu_handler,
|
||||
action='search_category',
|
||||
index=self._model.createIndex(item.row(), 0, item),
|
||||
search_state=TAG_SEARCH_STATES['mark_minus']))
|
||||
# Offer specific editors for tags/series/publishers/saved searches
|
||||
self.context_menu.addSeparator()
|
||||
if key in ['tags', 'publisher', 'series'] or \
|
||||
self.db.field_metadata[key]['is_custom']:
|
||||
self.context_menu.addAction(_('Manage %s')%category,
|
||||
partial(self.context_menu_handler, action='open_editor',
|
||||
category=tag.original_name if tag else None,
|
||||
key=key))
|
||||
elif key == 'authors':
|
||||
self.context_menu.addAction(_('Manage %s')%category,
|
||||
partial(self.context_menu_handler, action='edit_author_sort'))
|
||||
elif key == 'search':
|
||||
self.context_menu.addAction(_('Manage Saved Searches'),
|
||||
partial(self.context_menu_handler, action='manage_searches',
|
||||
category=tag.name if tag else None))
|
||||
|
||||
# Always show the user categories editor
|
||||
self.context_menu.addSeparator()
|
||||
if key.startswith('@') and \
|
||||
key[1:] in self.db.prefs.get('user_categories', {}).keys():
|
||||
self.context_menu.addAction(_('Manage User Categories'),
|
||||
partial(self.context_menu_handler, action='manage_categories',
|
||||
category=key[1:]))
|
||||
else:
|
||||
self.context_menu.addAction(_('Manage User Categories'),
|
||||
partial(self.context_menu_handler, action='manage_categories',
|
||||
category=None))
|
||||
|
||||
if self.hidden_categories:
|
||||
if not self.context_menu.isEmpty():
|
||||
self.context_menu.addSeparator()
|
||||
self.context_menu.addAction(_('Show all categories'),
|
||||
partial(self.context_menu_handler, action='defaults'))
|
||||
|
||||
m = self.context_menu.addMenu(_('Change sub-categorization scheme'))
|
||||
da = m.addAction('Disable',
|
||||
partial(self.context_menu_handler, action='categorization', category='disable'))
|
||||
fla = m.addAction('By first letter',
|
||||
partial(self.context_menu_handler, action='categorization', category='first letter'))
|
||||
pa = m.addAction('Partition',
|
||||
partial(self.context_menu_handler, action='categorization', category='partition'))
|
||||
if self.collapse_model == 'disable':
|
||||
da.setCheckable(True)
|
||||
da.setChecked(True)
|
||||
elif self.collapse_model == 'first letter':
|
||||
fla.setCheckable(True)
|
||||
fla.setChecked(True)
|
||||
else:
|
||||
pa.setCheckable(True)
|
||||
pa.setChecked(True)
|
||||
|
||||
if not self.context_menu.isEmpty():
|
||||
self.context_menu.popup(self.mapToGlobal(point))
|
||||
return True
|
||||
|
||||
def dragMoveEvent(self, event):
|
||||
QTreeView.dragMoveEvent(self, event)
|
||||
self.setDropIndicatorShown(False)
|
||||
index = self.indexAt(event.pos())
|
||||
if not index.isValid():
|
||||
return
|
||||
src_is_tb = event.mimeData().hasFormat('application/calibre+from_tag_browser')
|
||||
item = index.data(Qt.UserRole).toPyObject()
|
||||
flags = self._model.flags(index)
|
||||
if item.type == TagTreeItem.TAG and flags & Qt.ItemIsDropEnabled:
|
||||
self.setDropIndicatorShown(not src_is_tb)
|
||||
return
|
||||
if item.type == TagTreeItem.CATEGORY and not item.is_gst:
|
||||
fm_dest = self.db.metadata_for_field(item.category_key)
|
||||
if fm_dest['kind'] == 'user':
|
||||
if src_is_tb:
|
||||
if event.dropAction() == Qt.MoveAction:
|
||||
data = str(event.mimeData().data('application/calibre+from_tag_browser'))
|
||||
src = cPickle.loads(data)
|
||||
for s in src:
|
||||
if s[0] == TagTreeItem.TAG and \
|
||||
(not s[1].startswith('@') or s[2]):
|
||||
return
|
||||
self.setDropIndicatorShown(True)
|
||||
return
|
||||
md = event.mimeData()
|
||||
if hasattr(md, 'column_name'):
|
||||
fm_src = self.db.metadata_for_field(md.column_name)
|
||||
if md.column_name in ['authors', 'publisher', 'series'] or \
|
||||
(fm_src['is_custom'] and (
|
||||
(fm_src['datatype'] in ['series', 'text', 'enumeration'] and
|
||||
not fm_src['is_multiple']) or
|
||||
(fm_src['datatype'] == 'composite' and
|
||||
fm_src['display'].get('make_category', False)))):
|
||||
self.setDropIndicatorShown(True)
|
||||
|
||||
def clear(self):
|
||||
if self.model():
|
||||
self.model().clear_state()
|
||||
|
||||
def is_visible(self, idx):
|
||||
item = idx.data(Qt.UserRole).toPyObject()
|
||||
if getattr(item, 'type', None) == TagTreeItem.TAG:
|
||||
idx = idx.parent()
|
||||
return self.isExpanded(idx)
|
||||
|
||||
def recount(self, *args):
|
||||
'''
|
||||
Rebuild the category tree, expand any categories that were expanded,
|
||||
reset the search states, and reselect the current node.
|
||||
'''
|
||||
if self.disable_recounting or not self.pane_is_visible:
|
||||
return
|
||||
self.refresh_signal_processed = True
|
||||
ci = self.currentIndex()
|
||||
if not ci.isValid():
|
||||
ci = self.indexAt(QPoint(10, 10))
|
||||
path = self.model().path_for_index(ci) if self.is_visible(ci) else None
|
||||
expanded_categories, state_map = self.get_state()
|
||||
self._model.rebuild_node_tree(state_map=state_map)
|
||||
for category in expanded_categories:
|
||||
self.expand(self._model.index_for_category(category))
|
||||
self.show_item_at_path(path)
|
||||
|
||||
def show_item_at_path(self, path, box=False,
|
||||
position=QTreeView.PositionAtCenter):
|
||||
'''
|
||||
Scroll the browser and open categories to show the item referenced by
|
||||
path. If possible, the item is placed in the center. If box=True, a
|
||||
box is drawn around the item.
|
||||
'''
|
||||
if path:
|
||||
self.show_item_at_index(self._model.index_for_path(path), box=box,
|
||||
position=position)
|
||||
|
||||
def show_item_at_index(self, idx, box=False,
|
||||
position=QTreeView.PositionAtCenter):
|
||||
if idx.isValid():
|
||||
self.setCurrentIndex(idx)
|
||||
self.scrollTo(idx, position)
|
||||
self.setCurrentIndex(idx)
|
||||
if box:
|
||||
self._model.set_boxed(idx)
|
||||
|
||||
def item_expanded(self, idx):
|
||||
'''
|
||||
Called by the expanded signal
|
||||
'''
|
||||
self.setCurrentIndex(idx)
|
||||
|
||||
# }}}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -39,7 +39,7 @@ from calibre.gui2.jobs import JobManager, JobsDialog, JobsButton
|
||||
from calibre.gui2.init import LibraryViewMixin, LayoutMixin
|
||||
from calibre.gui2.search_box import SearchBoxMixin, SavedSearchBoxMixin
|
||||
from calibre.gui2.search_restriction_mixin import SearchRestrictionMixin
|
||||
from calibre.gui2.tag_view import TagBrowserMixin
|
||||
from calibre.gui2.tag_browser.ui import TagBrowserMixin
|
||||
|
||||
|
||||
class Listener(Thread): # {{{
|
||||
|
@ -72,9 +72,7 @@ class UpdateNotification(QDialog):
|
||||
self.label = QLabel(('<p>'+
|
||||
_('%s has been updated to version <b>%s</b>. '
|
||||
'See the <a href="http://calibre-ebook.com/whats-new'
|
||||
'">new features</a>.') + '<p>'+_('Update <b>only</b> if one of the '
|
||||
'new features or bug fixes is important to you. '
|
||||
'If the current version works well for you, do not update.'))%(
|
||||
'">new features</a>.'))%(
|
||||
__appname__, calibre_version))
|
||||
self.label.setOpenExternalLinks(True)
|
||||
self.label.setWordWrap(True)
|
||||
|
@ -2016,7 +2016,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
|
||||
val = mi.get(key, None)
|
||||
if force_changes or val is not None:
|
||||
doit(self.set_custom, id, val=val, extra=mi.get_extra(key),
|
||||
label=user_mi[key]['label'], commit=False)
|
||||
label=user_mi[key]['label'], commit=False, notify=False)
|
||||
if commit:
|
||||
self.conn.commit()
|
||||
if notify:
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -347,5 +347,6 @@ class EvalFormatter(TemplateFormatter):
|
||||
key = key.lower()
|
||||
return kwargs.get(key, _('No such variable ') + key)
|
||||
|
||||
# DEPRECATED. This is not thread safe. Do not use.
|
||||
eval_formatter = EvalFormatter()
|
||||
|
||||
|
@ -202,9 +202,9 @@ class BuiltinEval(BuiltinFormatterFunction):
|
||||
'results from local variables.')
|
||||
|
||||
def evaluate(self, formatter, kwargs, mi, locals, template):
|
||||
from formatter import eval_formatter
|
||||
from formatter import EvalFormatter
|
||||
template = template.replace('[[', '{').replace(']]', '}')
|
||||
return eval_formatter.safe_format(template, locals, 'EVAL', None)
|
||||
return EvalFormatter().safe_format(template, locals, 'EVAL', None)
|
||||
|
||||
class BuiltinAssign(BuiltinFormatterFunction):
|
||||
name = 'assign'
|
||||
@ -785,7 +785,7 @@ class BuiltinDaysBetween(BuiltinFormatterFunction):
|
||||
except:
|
||||
return ''
|
||||
i = d1 - d2
|
||||
return str(i.days)
|
||||
return str('%d.%d'%(i.days, i.seconds/8640))
|
||||
|
||||
|
||||
builtin_add = BuiltinAdd()
|
||||
|
@ -7,15 +7,82 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
import os
|
||||
import os, cPickle, struct
|
||||
from threading import Thread
|
||||
from Queue import Queue, Empty
|
||||
from multiprocessing.connection import arbitrary_address, Listener
|
||||
from functools import partial
|
||||
|
||||
from calibre.constants import iswindows
|
||||
from calibre import as_unicode, prints
|
||||
from calibre.constants import iswindows, DEBUG
|
||||
|
||||
def _encode(msg):
|
||||
raw = cPickle.dumps(msg, -1)
|
||||
size = len(raw)
|
||||
header = struct.pack('!Q', size)
|
||||
return header + raw
|
||||
|
||||
def _decode(raw):
|
||||
sz = struct.calcsize('!Q')
|
||||
if len(raw) < sz:
|
||||
return 'invalid', None
|
||||
header, = struct.unpack('!Q', raw[:sz])
|
||||
if len(raw) != sz + header or header == 0:
|
||||
return 'invalid', None
|
||||
return cPickle.loads(raw[sz:])
|
||||
|
||||
|
||||
class Writer(Thread):
|
||||
|
||||
TIMEOUT = 60 #seconds
|
||||
|
||||
def __init__(self, conn):
|
||||
Thread.__init__(self)
|
||||
self.daemon = True
|
||||
self.dataq, self.resultq = Queue(), Queue()
|
||||
self.conn = conn
|
||||
self.start()
|
||||
self.data_written = False
|
||||
|
||||
def close(self):
|
||||
self.dataq.put(None)
|
||||
|
||||
def flush(self):
|
||||
pass
|
||||
|
||||
def write(self, raw_data):
|
||||
self.dataq.put(raw_data)
|
||||
|
||||
try:
|
||||
ex = self.resultq.get(True, self.TIMEOUT)
|
||||
except Empty:
|
||||
raise IOError('Writing to socket timed out')
|
||||
else:
|
||||
if ex is not None:
|
||||
raise IOError('Writing to socket failed with error: %s' % ex)
|
||||
|
||||
def run(self):
|
||||
while True:
|
||||
x = self.dataq.get()
|
||||
if x is None:
|
||||
break
|
||||
try:
|
||||
self.data_written = True
|
||||
self.conn.send_bytes(x)
|
||||
except Exception as e:
|
||||
self.resultq.put(as_unicode(e))
|
||||
else:
|
||||
self.resultq.put(None)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.close()
|
||||
|
||||
class Server(Thread):
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, dispatcher):
|
||||
Thread.__init__(self)
|
||||
self.daemon = True
|
||||
|
||||
@ -27,6 +94,13 @@ class Server(Thread):
|
||||
authkey=self.auth_key, backlog=4)
|
||||
|
||||
self.keep_going = True
|
||||
self.dispatcher = dispatcher
|
||||
|
||||
@property
|
||||
def connection_information(self):
|
||||
if not self.is_alive():
|
||||
self.start()
|
||||
return (self.address, self.auth_key)
|
||||
|
||||
def stop(self):
|
||||
self.keep_going = False
|
||||
@ -43,4 +117,49 @@ class Server(Thread):
|
||||
except:
|
||||
pass
|
||||
|
||||
def handle_client(self, conn):
|
||||
t = Thread(target=partial(self._handle_client, conn))
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
def _handle_client(self, conn):
|
||||
while True:
|
||||
try:
|
||||
func_name, args, kwargs = conn.recv()
|
||||
except EOFError:
|
||||
try:
|
||||
conn.close()
|
||||
except:
|
||||
pass
|
||||
return
|
||||
else:
|
||||
try:
|
||||
self.call_func(func_name, args, kwargs, conn)
|
||||
except:
|
||||
try:
|
||||
conn.close()
|
||||
except:
|
||||
pass
|
||||
prints('Proxy function: %s with args: %r and'
|
||||
' kwargs: %r failed')
|
||||
if DEBUG:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
break
|
||||
|
||||
def call_func(self, func_name, args, kwargs, conn):
|
||||
with Writer(conn) as f:
|
||||
try:
|
||||
self.dispatcher(f, func_name, args, kwargs)
|
||||
except Exception as e:
|
||||
if not f.data_written:
|
||||
import traceback
|
||||
# Try to tell the client process what error happened
|
||||
try:
|
||||
conn.send_bytes(_encode(('failed', (unicode(e),
|
||||
as_unicode(traceback.format_exc())))))
|
||||
except:
|
||||
pass
|
||||
raise
|
||||
|
||||
|
||||
|
6
src/calibre/utils/opensearch/__init__.py
Normal file
6
src/calibre/utils/opensearch/__init__.py
Normal file
@ -0,0 +1,6 @@
|
||||
from description import Description
|
||||
from query import Query
|
||||
from client import Client
|
||||
from results import Results
|
||||
|
||||
Description, Query, Client, Results
|
39
src/calibre/utils/opensearch/client.py
Normal file
39
src/calibre/utils/opensearch/client.py
Normal file
@ -0,0 +1,39 @@
|
||||
from description import Description
|
||||
from query import Query
|
||||
from results import Results
|
||||
|
||||
class Client:
|
||||
|
||||
"""This is the class you'll probably want to be using. You simply
|
||||
pass the constructor the url for the service description file and
|
||||
issue a search and get back results as an iterable Results object.
|
||||
|
||||
The neat thing about a Results object is that it will seamlessly
|
||||
handle fetching more results from the opensearch server when it can...
|
||||
so you just need to iterate and can let the paging be taken care of
|
||||
for you.
|
||||
|
||||
from opensearch import Client
|
||||
client = Client(description_url)
|
||||
results = client.search("computer")
|
||||
for result in results:
|
||||
print result.title
|
||||
"""
|
||||
|
||||
def __init__(self, url, agent="python-opensearch <https://github.com/edsu/opensearch>"):
|
||||
self.agent = agent
|
||||
self.description = Description(url, self.agent)
|
||||
|
||||
def search(self, search_terms, page_size=25):
|
||||
"""Perform a search and get back a results object
|
||||
"""
|
||||
url = self.description.get_best_template()
|
||||
query = Query(url)
|
||||
|
||||
# set up initial values
|
||||
query.searchTerms = search_terms
|
||||
query.count = page_size
|
||||
|
||||
# run the results
|
||||
return Results(query, agent=self.agent)
|
||||
|
127
src/calibre/utils/opensearch/description.py
Normal file
127
src/calibre/utils/opensearch/description.py
Normal file
@ -0,0 +1,127 @@
|
||||
from urllib2 import urlopen, Request
|
||||
from xml.dom.minidom import parse
|
||||
from url import URL
|
||||
|
||||
class Description:
|
||||
"""A class for representing OpenSearch Description files.
|
||||
"""
|
||||
|
||||
def __init__(self, url="", agent=""):
|
||||
"""The constructor which may pass an optional url to load from.
|
||||
|
||||
d = Description("http://www.example.com/description")
|
||||
"""
|
||||
self.agent = agent
|
||||
if url:
|
||||
self.load(url)
|
||||
|
||||
|
||||
def load(self, url):
|
||||
"""For loading up a description object from a url. Normally
|
||||
you'll probably just want to pass a URL into the constructor.
|
||||
"""
|
||||
req = Request(url, headers={'User-Agent':self.agent})
|
||||
self.dom = parse(urlopen(req))
|
||||
|
||||
# version 1.1 has repeating Url elements
|
||||
self.urls = self._get_urls()
|
||||
|
||||
# this is version 1.0 specific
|
||||
self.url = self._get_element_text('Url')
|
||||
self.format = self._get_element_text('Format')
|
||||
|
||||
self.shortname = self._get_element_text('ShortName')
|
||||
self.longname = self._get_element_text('LongName')
|
||||
self.description = self._get_element_text('Description')
|
||||
self.image = self._get_element_text('Image')
|
||||
self.samplesearch = self._get_element_text('SampleSearch')
|
||||
self.developer = self._get_element_text('Developer')
|
||||
self.contact = self._get_element_text('Contact')
|
||||
self.attribution = self._get_element_text('Attribution')
|
||||
self.syndicationright = self._get_element_text('SyndicationRight')
|
||||
|
||||
tag_text = self._get_element_text('Tags')
|
||||
if tag_text != None:
|
||||
self.tags = tag_text.split(" ")
|
||||
|
||||
if self._get_element_text('AdultContent') == 'true':
|
||||
self.adultcontent = True
|
||||
else:
|
||||
self.adultcontent = False
|
||||
|
||||
def get_url_by_type(self, type):
|
||||
"""Walks available urls and returns them by type. Only
|
||||
appropriate in opensearch v1.1 where there can be multiple
|
||||
query targets. Returns none if no such type is found.
|
||||
|
||||
url = description.get_url_by_type('application/rss+xml')
|
||||
"""
|
||||
for url in self.urls:
|
||||
if url.type == type:
|
||||
return url
|
||||
return None
|
||||
|
||||
def get_best_template(self):
|
||||
"""OK, best is a value judgement, but so be it. You'll get
|
||||
back either the atom, rss or first template available. This
|
||||
method handles the main difference between opensearch v1.0 and v1.1
|
||||
"""
|
||||
# version 1.0
|
||||
if self.url:
|
||||
return self.url
|
||||
|
||||
# atom
|
||||
if self.get_url_by_type('application/atom+xml'):
|
||||
return self.get_url_by_type('application/atom+xml').template
|
||||
|
||||
# rss
|
||||
if self.get_url_by_type('application/rss+xml'):
|
||||
return self.get_url_by_type('application/rss+xml').template
|
||||
|
||||
# other possible rss type
|
||||
if self.get_url_by_type('text/xml'):
|
||||
return self.get_url_by_Type('text/xml').template
|
||||
|
||||
# otherwise just the first one
|
||||
if len(self.urls) > 0:
|
||||
return self.urls[0].template
|
||||
|
||||
# out of luck
|
||||
return None
|
||||
|
||||
|
||||
# these are internal methods for querying xml
|
||||
|
||||
def _get_element_text(self, tag):
|
||||
elements = self._get_elements(tag)
|
||||
if not elements:
|
||||
return None
|
||||
return self._get_text(elements[0].childNodes)
|
||||
|
||||
def _get_attribute_text(self, tag, attribute):
|
||||
elements = self._get_elements(tag)
|
||||
if not elements:
|
||||
return ''
|
||||
return elements[0].getAttribute('template')
|
||||
|
||||
def _get_elements(self, tag):
|
||||
return self.dom.getElementsByTagName(tag)
|
||||
|
||||
def _get_text(self, nodes):
|
||||
text = ''
|
||||
for node in nodes:
|
||||
if node.nodeType == node.TEXT_NODE:
|
||||
text += node.data
|
||||
return text.strip()
|
||||
|
||||
def _get_urls(self):
|
||||
urls = []
|
||||
for element in self._get_elements('Url'):
|
||||
template = element.getAttribute('template')
|
||||
type = element.getAttribute('type')
|
||||
if template and type:
|
||||
url = URL()
|
||||
url.template = template
|
||||
url.type = type
|
||||
urls.append(url)
|
||||
return urls
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user