Sync to trunk.

This commit is contained in:
John Schember 2011-10-29 13:34:15 -04:00
commit 86679c65c0
126 changed files with 46235 additions and 38337 deletions

View File

@ -19,6 +19,114 @@
# new recipes:
# - title:
- version: 0.8.24
date: 2011-10-27
new features:
- title: "Kobo: Add support for fetching annotations from the kobo reader."
description: "Right click the send to device button in calibre with your kobo connected and choose fetch annotations. The annotations are placed into the comments of the corresponding books in the calibre library. This feature is still experimental."
type: major
- title: "Preserve the set of selected books in the library view when a device is connected, fixing a long standing annoyance"
bug fixes:
- title: "Prevent changing of device metadata management option while a device is connected."
tickets: [874118]
- title: "Book details panel: Show tooltip only when hovering over cover, not the rest of the book information, as it makes it hard to read."
tickets: [876454]
- title: "MOBI Output: Fix use of list elements as link anchors caused links to always point to start of list."
tickets: [879391]
- title: "RB Output: Fix calibre generated rb files not being opened by the RocketBook."
tickets: [880930]
- title: "FB2 Input: Dont choke on FB2 files that have empty embedded content tags."
tickets: [880904]
- title: "ODT Input: CSS rationalization should not fail with non ascii class names"
- title: "Fix creating new library using the copy structure option incorrectly setting all text type columns to be like the tags column"
- title: "E-book viewer: Don't choke on windows installs with a non UTF-8 filesystem encoding."
tickets: [879740]
improved recipes:
- Novaya Gazeta
- El Universal (Venezuela)
- The Australian (subscription enabled)
- Metro NL
- The Scotsman
- Japan Times
new recipes:
- title: Silicon Republic
author: Neil Grogan
- title: Calibre Blog
author: Krittika Goyal
- version: 0.8.23
date: 2011-10-21
new features:
- title: "Drivers for T-Mobile Move, new Pandigital Novel, New Onyx Boox and Freescale MX 515"
- title: "SONY T1 driver: Support for periodicals and better timezone detection"
- title: "Add a remove cover entry to the right click menu of the cover display in the right panel"
tickets: [874689]
bug fixes:
- title: "Amazon metadata download: Fix for change in Amazon website that broke downloading metadata."
tickets: [878395]
- title: "MOBI metadata: When reading titles from MOBI files only use the title in the PDB header if there is no long title in the EXTH header"
tickets: [ 875243 ]
- title: "Fix regression that broke use of complex custom columns in save to disk templates."
tickets: [877366]
- title: "Fix regression that broke reading metadata from CHM files"
- title: "Fix a bug that broke conversion of some zipped up HTML files with non ascii filenames on certain windows installs."
tickets: [873288]
- title: "RTF Input: Fix bug in handling of paragraph separators."
tickets: [863735]
- title: "Fix a regression that broke downloading certain periodicals for the Kindle."
tickets: [875595]
- title: "Fix regression that broke updating of covers inside ebook files when saving to disk"
- title: "Fix regression breaking editing the 'show in tag browser' checkbox in custom column setup editing"
- title: "Fix typo that broke stopping selected jobs in 0.8.22"
improved recipes:
- Columbus Dispatch
- Ming Pao
- La Republica
- Korea Times
- USA Today
- CNN
- Liberation
- El Pais
- Helsingin Sanomat
new recipes:
- title: Kyugyhang, Hankyoreh and Hankyoreh21
author: Seongkyoun Yoo.
- title: English Katherimini
author: Thomas Scholl
- title: Various French news sources
author: Aurelien Chabot.
- version: 0.8.22
date: 2011-10-14

View File

@ -4,7 +4,6 @@ __copyright__ = '2011 Aurélien Chabot <contact@aurelienchabot.fr>'
'''
20minutes.fr
'''
import re
from calibre.web.feeds.recipes import BasicNewsRecipe
class Minutes(BasicNewsRecipe):

View File

@ -0,0 +1,18 @@
from calibre.web.feeds.news import BasicNewsRecipe
class CalibreBlog(BasicNewsRecipe):
title = u'Calibre Blog'
language = 'en'
__author__ = 'Krittika Goyal'
oldest_article = 1000 #days
max_articles_per_feed = 5
use_embedded_content = False
no_stylesheets = True
auto_cleanup = True
feeds = [
('Article',
'http://blog.calibre-ebook.com/feeds/posts/default'),
]

View File

@ -14,67 +14,43 @@ class ColumbusDispatchRecipe(BasicNewsRecipe):
use_embedded_content = False
remove_empty_feeds = True
oldest_article = 1.2
max_articles_per_feed = 100
use_embedded_content = False
no_stylesheets = True
remove_javascript = True
encoding = 'utf-8'
# Seems to work best, but YMMV
simultaneous_downloads = 2
auto_cleanup = True
#auto_cleanup_keep = '//div[@id="story-photos"]'
# Feeds from http://www.dispatch.com/live/content/rss/index.html
feeds = []
feeds.append((u'News: Local and state news', u'http://www.dispatch.com/live/static/crt/2_rss_localnews.xml'))
feeds.append((u'News: National news', u'http://www.dispatch.com/live/static/crt/2_rss_nationalnews.xml'))
feeds.append((u'News: Editorials', u'http://www.dispatch.com/live/static/crt/2_rss_editorials.xml'))
feeds.append((u'News: Columnists', u'http://www.dispatch.com/live/static/crt/2_rss_columnists.xml'))
feeds.append((u'News: Health news', u'http://www.dispatch.com/live/static/crt/2_rss_health.xml'))
feeds.append((u'News: Science news', u'http://www.dispatch.com/live/static/crt/2_rss_science.xml'))
feeds.append((u'Sports: OSU football', u'http://www.dispatch.com/live/static/crt/2_rss_osufootball.xml'))
feeds.append((u'Sports: OSU men\'s basketball', u'http://www.dispatch.com/live/static/crt/2_rss_osumensbball.xml'))
feeds.append((u'Sports: OSU women\'s basketball', u'http://www.dispatch.com/live/static/crt/2_rss_osuwomensbball.xml'))
feeds.append((u'Sports: OSU sports', u'http://www.dispatch.com/live/static/crt/2_rss_osusports.xml'))
feeds.append((u'Sports: Blue Jackets', u'http://www.dispatch.com/live/static/crt/2_rss_bluejackets.xml'))
feeds.append((u'Sports: Crew', u'http://www.dispatch.com/live/static/crt/2_rss_crew.xml'))
feeds.append((u'Sports: Clippers', u'http://www.dispatch.com/live/static/crt/2_rss_clippers.xml'))
feeds.append((u'Sports: Indians', u'http://www.dispatch.com/live/static/crt/2_rss_indians.xml'))
feeds.append((u'Sports: Reds', u'http://www.dispatch.com/live/static/crt/2_rss_reds.xml'))
feeds.append((u'Sports: Golf', u'http://www.dispatch.com/live/static/crt/2_rss_golf.xml'))
feeds.append((u'Sports: Outdoors', u'http://www.dispatch.com/live/static/crt/2_rss_outdoors.xml'))
feeds.append((u'Sports: Cavs/NBA', u'http://www.dispatch.com/live/static/crt/2_rss_cavaliers.xml'))
feeds.append((u'Sports: High Schools', u'http://www.dispatch.com/live/static/crt/2_rss_highschools.xml'))
feeds.append((u'Sports: Browns', u'http://www.dispatch.com/live/static/crt/2_rss_browns.xml'))
feeds.append((u'Sports: Bengals', u'http://www.dispatch.com/live/static/crt/2_rss_bengals.xml'))
feeds.append((u'Sports: Auto Racing', u'http://www.dispatch.com/live/static/crt/2_rss_autoracing.xml'))
feeds.append((u'Business News', u'http://www.dispatch.com/live/static/crt/2_rss_business.xml'))
feeds.append((u'Features: Weekender', u'http://www.dispatch.com/live/static/crt/2_rss_weekender.xml'))
feeds.append((u'Features: Life and Arts', u'http://www.dispatch.com/live/static/crt/2_rss_lifearts.xml'))
feeds.append((u'Features: Food', u'http://www.dispatch.com/live/static/crt/2_rss_food.xml'))
feeds.append((u'Features: NOW! for kids', u'http://www.dispatch.com/live/static/crt/2_rss_now.xml'))
feeds.append((u'Features: Travel', u'http://www.dispatch.com/live/static/crt/2_rss_travel.xml'))
feeds.append((u'Features: Home and Garden', u'http://www.dispatch.com/live/static/crt/2_rss_homegarden.xml'))
feeds.append((u'Features: Faith and Values', u'http://www.dispatch.com/live/static/crt/2_rss_faithvalues.xml'))
#feeds.append((u'', u''))
feeds = [
('Local',
'http://www.dispatch.com/content/syndication/news_local-state.xml'),
('National',
'http://www.dispatch.com/content/syndication/news_national.xml'),
('Business',
'http://www.dispatch.com/content/syndication/news_business.xml'),
('Editorials',
'http://www.dispatch.com/content/syndication/opinion_editorials.xml'),
('Columnists',
'http://www.dispatch.com/content/syndication/opinion_columns.xml'),
('Life and Arts',
'http://www.dispatch.com/content/syndication/lae_life-and-arts.xml'),
('OSU Sports',
'http://www.dispatch.com/content/syndication/sports_osu.xml'),
('Auto Racing',
'http://www.dispatch.com/content/syndication/sports_auto-racing.xml'),
('Outdoors',
'http://www.dispatch.com/content/syndication/sports_outdoors.xml'),
('Bengals',
'http://www.dispatch.com/content/syndication/sports_bengals.xml'),
('Indians',
'http://www.dispatch.com/content/syndication/sports_indians.xml'),
('Clippers',
'http://www.dispatch.com/content/syndication/sports_clippers.xml'),
('Crew',
'http://www.dispatch.com/content/syndication/sports_crew.xml'),
('Reds',
'http://www.dispatch.com/content/syndication/sports_reds.xml'),
('Blue Jackets',
'http://www.dispatch.com/content/syndication/sports_bluejackets.xml'),
]
keep_only_tags = []
keep_only_tags.append(dict(name = 'div', attrs = {'class': 'colhed'}))
keep_only_tags.append(dict(name = 'div', attrs = {'class': 'hed'}))
keep_only_tags.append(dict(name = 'div', attrs = {'class': 'subhed'}))
keep_only_tags.append(dict(name = 'div', attrs = {'class': 'date'}))
keep_only_tags.append(dict(name = 'div', attrs = {'class': 'byline'}))
keep_only_tags.append(dict(name = 'div', attrs = {'class': 'srcline'}))
keep_only_tags.append(dict(name = 'div', attrs = {'class': 'body'}))
remove_tags = []
remove_tags.append(dict(name = 'div', attrs = {'id': 'middle-story-ad-container'}))
extra_css = '''
body {font-family:verdana,arial,helvetica,geneva,sans-serif ;}
a {text-decoration: none; color: blue;}
div.colhed {font-weight: bold;}
div.hed {font-size: xx-large; font-weight: bold; margin-bottom: 0.2em;}
div.subhed {font-size: large;}
div.date {font-size: x-small; font-style: italic; color: #666666; margin-top: 0.4em; margin-bottom: 0.4em;}
div.byline, div.srcline {font-size: small; color: #696969;}
'''

View File

@ -56,6 +56,7 @@ class ElUniversal(BasicNewsRecipe):
]
def print_version(self, url):
rp,sep,rest = url.rpartition('/')
return rp + sep + 'imp_' + rest
return url + '-imp'
def get_article_url(self, article):
return article.get('guid', None)

View File

@ -1,5 +1,7 @@
# -*- coding: utf-8 -*-
class BasicUserRecipe1318572550(AutomaticNewsRecipe):
from calibre.web.feeds.news import BasicNewsRecipe
class BasicUserRecipe1318572550(BasicNewsRecipe):
title = u'FrAndroid'
oldest_article = 2
max_articles_per_feed = 100

View File

@ -1,5 +1,8 @@
# -*- coding: utf-8 -*-
class BasicUserRecipe1318572445(AutomaticNewsRecipe):
from calibre.web.feeds.news import BasicNewsRecipe
class BasicUserRecipe1318572445(BasicNewsRecipe):
title = u'Google Mobile Blog'
oldest_article = 7
max_articles_per_feed = 100

View File

@ -3,34 +3,31 @@ __copyright__ = '2011, Seongkyoun Yoo <seongkyoun.yoo at gmail.com>'
'''
Profile to download The Hankyoreh
'''
import re
from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import BeautifulSoup
class Hankyoreh(BasicNewsRecipe):
title = u'Hankyoreh'
language = 'ko'
description = u'The Hankyoreh News articles'
__author__ = 'Seongkyoun Yoo'
__author__ = 'Seongkyoun Yoo'
oldest_article = 5
recursions = 1
max_articles_per_feed = 5
no_stylesheets = True
keep_only_tags = [
dict(name='tr', attrs={'height':['60px']}),
dict(id=['fontSzArea'])
dict(name='tr', attrs={'height':['60px']}),
dict(id=['fontSzArea'])
]
remove_tags = [
dict(target='_blank'),
dict(name='td', attrs={'style':['padding: 10px 8px 5px 8px;']}),
dict(name='iframe', attrs={'width':['590']}),
dict(name='td', attrs={'style':['padding: 10px 8px 5px 8px;']}),
dict(name='iframe', attrs={'width':['590']}),
]
remove_tags_after = [
dict(target='_top')
]
feeds = [
('All News','http://www.hani.co.kr/rss/'),
('All News','http://www.hani.co.kr/rss/'),
('Politics','http://www.hani.co.kr/rss/politics/'),
('Economy','http://www.hani.co.kr/rss/economy/'),
('Society','http://www.hani.co.kr/rss/society/'),
@ -47,4 +44,4 @@ class Hankyoreh(BasicNewsRecipe):
('Multihani','http://www.hani.co.kr/rss/multihani/'),
('Lead','http://www.hani.co.kr/rss/lead/'),
('Newsrank','http://www.hani.co.kr/rss/newsrank/'),
]
]

View File

@ -3,7 +3,6 @@ __copyright__ = '2011, Seongkyoun Yoo <seongkyoun.yoo at gmail.com>'
'''
Profile to download The Hankyoreh
'''
import re
from calibre.web.feeds.news import BasicNewsRecipe
class Hankyoreh21(BasicNewsRecipe):
@ -23,4 +22,4 @@ class Hankyoreh21(BasicNewsRecipe):
feeds = [
('Hani21','http://h21.hani.co.kr/rss/ '),
]
]

Binary file not shown.

After

Width:  |  Height:  |  Size: 712 B

View File

@ -44,7 +44,11 @@ class JapanTimes(BasicNewsRecipe):
return rurl.partition('?')[0]
def print_version(self, url):
return url.replace('/cgi-bin/','/print/')
if '/rss/' in url:
return url.replace('.jp/rss/','.jp/print/')
if '/text/' in url:
return url.replace('.jp/text/','.jp/print/')
return url
def preprocess_html(self, soup):
for item in soup.findAll(style=True):

View File

@ -1,5 +1,7 @@
# -*- coding: utf-8 -*-
class BasicUserRecipe1318619728(AutomaticNewsRecipe):
from calibre.web.feeds.news import BasicNewsRecipe
class BasicUserRecipe1318619728(BasicNewsRecipe):
title = u'Korben'
oldest_article = 7
max_articles_per_feed = 100

View File

@ -4,7 +4,6 @@ __copyright__ = '2011 Aurélien Chabot <contact@aurelienchabot.fr>'
'''
LePoint.fr
'''
import re
from calibre.web.feeds.recipes import BasicNewsRecipe
class lepoint(BasicNewsRecipe):

View File

@ -4,7 +4,6 @@ __copyright__ = '2011 Aurélien Chabot <contact@aurelienchabot.fr>'
'''
Lexpress.fr
'''
import re
from calibre.web.feeds.recipes import BasicNewsRecipe
class lepoint(BasicNewsRecipe):

View File

@ -1,9 +1,21 @@
from calibre.web.feeds.news import BasicNewsRecipe
import re
from calibre.utils.magick import Image
''' Version 1.2, updated cover image to match the changed website.
added info date on title
version 1.4 Updated tags, delay and added autoclean 22-09-2011
version 1.5 Changes due to changes in site
version 1.6 Added css, removed auto cleanup, added buitenland section, added use_embedded_content, added remove_attributes
Added som processing on pictures
Removed links in html
Removed extre white characters
changed handling of self closing span
'''
class AdvancedUserRecipe1306097511(BasicNewsRecipe):
title = u'Metro Nieuws NL'
# Version 1.2, updated cover image to match the changed website.
# added info date on title
oldest_article = 2
max_articles_per_feed = 100
__author__ = u'DrMerry'
@ -11,8 +23,8 @@ class AdvancedUserRecipe1306097511(BasicNewsRecipe):
language = u'nl'
simultaneous_downloads = 5
#delay = 1
auto_cleanup = True
auto_cleanup_keep = '//div[@class="article-image-caption-2column"]|//div[@id="date"]'
#auto_cleanup = True
#auto_cleanup_keep = '//div[@class="article-image-caption-2column"]/*|//div[@id="date"]/*|//div[@class="article-image-caption-3column"]/*'
timefmt = ' [%A, %d %b %Y]'
no_stylesheets = True
remove_javascript = True
@ -20,22 +32,73 @@ class AdvancedUserRecipe1306097511(BasicNewsRecipe):
cover_url = 'http://www.oldreadmetro.com/img/en/metroholland/last/1/small.jpg'
publication_type = 'newspaper'
remove_tags_before = dict(name='div', attrs={'id':'date'})
remove_tags_after = dict(name='div', attrs={'id':'column-1-3'})
remove_tags_after = dict(name='div', attrs={'class':'article-body'})
encoding = 'utf-8'
extra_css = 'body{font-size:12px} #date, .article-image-caption {font-size: 0.583em} h2 {font-size: 0.917em} p.small, span, li, li span span, p, b, i, u, p.small.article-paragraph, p.small.article-paragraph p, p.small.article-paragraph span, p span, span {font-size: 0.833em} h1 {font-size: 1em}'
remove_attributes = ['style', 'font', 'width', 'height']
use_embedded_content = False
extra_css = 'body {padding:5px 0px; background:#fff;font-size: 13px;}\
#date {clear: both;margin-left: 19px;font-size: 11px;font-weight: 300;color: #616262;height: 15px;}\
.article-box-fact.module-title {clear:both;border-top:1px solid black;border-bottom:4px solid black;padding: 8px 0;color: #24763b;font-family: arial, sans-serif;font-size: 14px;font-weight: bold;}\
h1.title {color: #000000;font-size: 44px;padding-bottom: 10px;line-height: 1.15;font-weight: 300;} h2.subtitle {font-size: 13px;font-weight: 700;padding-bottom: 10px;}\
.article-body p{padding-bottom:10px;}div.column-1-3{float: left;display: inline;width: 567px;margin-left: 19px;border-right: 1px solid #CACACA;padding-right: 9px;}\
div.column-1-2 {float: left;display: inline;width: 373px;padding-right: 7px;border-right: 1px solid #CACACA;}\
p.article-image-caption {font-size: 12px;font-weight: 300;line-height: 1.4;color: #616262;margin-top: 5px;} \
p.article-image-caption .credits {font-style: italic;font-size: 10px;}\
div.article-image-caption {width: 246px;margin-bottom: 5px;margin-left: 10px;}\
div.article-image-caption-2column {margin-bottom: 10px;width: 373px;} div.article-image-caption-3column {}\
img {border:0px;} .img-mask {position:absolute;top:0px;left:0px;}'
keep_only_tags = [dict(name='div', attrs={'class':[ 'article-image-caption-2column', 'article-image-caption-3column', 'article-body', 'article-box-fact']}),
dict(name='div', attrs={'id':['date']}),
dict(name='h1', attrs={'class':['title']}),
dict(name='h2', attrs={'class':['subtitle']})]
remove_tags = [dict(name='div', attrs={'class':[ 'metroCommentFormWrap',
'commentForm', 'metroCommentInnerWrap', 'article-slideshow-counter-container', 'article-slideshow-control', 'ad', 'header-links',
'art-rgt','pluck-app pluck-comm', 'share-and-byline', 'article-tools-below-title', 'col-179 ', 'related-links', 'clear padding-top-15', 'share-tools', 'article-page-auto-pushes', 'footer-edit']}),
dict(name='div', attrs={'id':['article-2', 'article-4', 'article-1', 'navigation', 'footer', 'header', 'comments', 'sidebar']}),
dict(name='div', attrs={'id':['article-2', 'article-4', 'article-1', 'navigation', 'footer', 'header', 'comments', 'sidebar', 'share-and-byline']}),
dict(name='iframe')]
preprocess_regexps = [(re.compile(r'(<p>(&nbsp;|\s)*</p>|<a[^>]*>Tweet</a>|<a[^>]*>|</a>|<!--.*?-->)', re.DOTALL|re.IGNORECASE),lambda match: ''),
(re.compile(r'(&nbsp;|\s\s)+\s*', re.DOTALL|re.IGNORECASE),lambda match: ' '),
(re.compile(r'([\s>])([^\s>]+)(<span[^>]+) />', re.DOTALL|re.IGNORECASE),
lambda match: match.group(1) + match.group(3) + '>' + match.group(2) + '</span>'),
]
def postprocess_html(self, soup, first):
for tag in soup.findAll(lambda tag: tag.name.lower()=='img' and tag.has_key('src')):
iurl = tag['src']
img = Image()
img.open(iurl)
#width, height = img.size
#print '***img is: ', iurl, '\n****width is: ', width, 'height is: ', height
img.trim(0)
img.save(iurl)
'''
#width, height = img.size
#print '***TRIMMED img width is: ', width, 'height is: ', height
left=0
top=0
border_color='#ffffff'
width, height = img.size
#print '***retrieved img width is: ', width, 'height is: ', height
height_correction = 1.17
canvas = create_canvas(width, height*height_correction,border_color)
canvas.compose(img, left, top)
#img = canvas
canvas.save(iurl)
#width, height = canvas.size
#print '***NEW img width is: ', width, 'height is: ', height
'''
return soup
feeds = [
(u'Binnenland', u'http://www.metronieuws.nl/rss.xml?c=1277377288-3'),
(u'Economie', u'http://www.metronieuws.nl/rss.xml?c=1278070988-0'),
(u'Den Haag', u'http://www.metronieuws.nl/rss.xml?c=1289013337-3'),
(u'Rotterdam', u'http://www.metronieuws.nl/rss.xml?c=1289013337-2'),
(u'Amsterdam', u'http://www.metronieuws.nl/rss.xml?c=1289013337-1'),
(u'Buitenland', u'http://www.metronieuws.nl/rss.xml?c=1277377288-4'),
(u'Columns', u'http://www.metronieuws.nl/rss.xml?c=1277377288-17'),
(u'Entertainment', u'http://www.metronieuws.nl/rss.xml?c=1277377288-2'),
(u'Dot', u'http://www.metronieuws.nl/rss.xml?c=1283166782-12'),

View File

@ -18,10 +18,14 @@ __InclPremium__ = False
__ParsePFF__ = True
# (HK only) Turn below to True if you wish hi-res images (Default: False)
__HiResImg__ = False
# Override the date returned by the program if specifying a YYYYMMDD below
__Date__ = ''
'''
Change Log:
2011/10/21: fix a bug that hi-res img is unavailable in pages parsed from source txt
2011/10/19: fix a bug in txt source parsing
2011/10/17: disable fetching of premium content, also improved txt source parsing
2011/10/04: option to get hi-res photos for the articles
2011/09/21: fetching "column" section is made optional.
@ -170,13 +174,22 @@ class MPRecipe(BasicNewsRecipe):
return dt_local
def get_fetchdate(self):
return self.get_dtlocal().strftime("%Y%m%d")
if __Date__ <> '':
return __Date__
else:
return self.get_dtlocal().strftime("%Y%m%d")
def get_fetchformatteddate(self):
return self.get_dtlocal().strftime("%Y-%m-%d")
if __Date__ <> '':
return __Date__[0:4]+'-'+__Date__[4:6]+'-'+__Date__[6:8]
else:
return self.get_dtlocal().strftime("%Y-%m-%d")
def get_fetchday(self):
return self.get_dtlocal().strftime("%d")
if __Date__ <> '':
return __Date__[6:8]
else:
return self.get_dtlocal().strftime("%d")
def get_cover_url(self):
if __Region__ == 'Hong Kong':
@ -477,53 +490,8 @@ class MPRecipe(BasicNewsRecipe):
# preprocess those .txt and javascript based files
def preprocess_raw_html(self, raw_html, url):
#raw_html = raw_html.replace(u'<p>\u3010', u'\u3010')
if __HiResImg__ == True:
# TODO: add a _ in front of an image url
if url.rfind('news.mingpao.com') > -1:
imglist = re.findall('src="?.*?jpg"', raw_html)
br = mechanize.Browser()
br.set_handle_redirect(False)
for img in imglist:
gifimg = img.replace('jpg"', 'gif"')
try:
br.open_novisit(url + "/../" + gifimg[5:len(gifimg)-1])
raw_html = raw_html.replace(img, gifimg)
except:
# find the location of the first _
pos = img.find('_')
if pos > -1:
# if found, insert _ after the first _
newimg = img[0:pos] + '_' + img[pos:]
raw_html = raw_html.replace(img, newimg)
else:
# if not found, insert _ after "
raw_html = raw_html.replace(img[1:], '"_' + img[1:])
elif url.rfind('life.mingpao.com') > -1:
imglist = re.findall('src=\'?.*?jpg\'', raw_html)
br = mechanize.Browser()
br.set_handle_redirect(False)
#print 'Img list: ', imglist, '\n'
for img in imglist:
gifimg = img.replace('jpg\'', 'gif\'')
try:
#print 'Original: ', url
#print 'To append: ', "/../" + gifimg[5:len(gifimg)-1]
gifurl = re.sub(r'dailynews.*txt', '', url)
#print 'newurl: ', gifurl + gifimg[5:len(gifimg)-1]
br.open_novisit(gifurl + gifimg[5:len(gifimg)-1])
#print 'URL: ', url + "/../" + gifimg[5:len(gifimg)-1]
#br.open_novisit(url + "/../" + gifimg[5:len(gifimg)-1])
raw_html = raw_html.replace(img, gifimg)
except:
#print 'GIF not found'
pos = img.rfind('/')
newimg = img[0:pos+1] + '_' + img[pos+1:]
#print 'newimg: ', newimg
raw_html = raw_html.replace(img, newimg)
if url.rfind('ftp') == -1 and url.rfind('_print.htm') == -1:
return raw_html
else:
new_html = raw_html
if url.rfind('ftp') <> -1 or url.rfind('_print.htm') <> -1:
if url.rfind('_print.htm') <> -1:
# javascript based file
splitter = re.compile(r'\n')
@ -558,49 +526,114 @@ class MPRecipe(BasicNewsRecipe):
photo = photo.replace('</td>', '<br>')
photo = photo.replace('class="photo"', '')
new_raw_html = new_raw_html + '<div class="images">' + photo + '</div>'
return new_raw_html + '</body></html>'
new_html = new_raw_html + '</body></html>'
else:
# .txt based file
splitter = re.compile(r'\n') # Match non-digits
new_raw_html = '<html><head><title>Untitled</title></head><body><div class="images">'
next_is_mov_link = False
next_is_img_txt = False
title_started = False
met_article_start_char = False
for item in splitter.split(raw_html):
item = item.strip()
if item.startswith(u'\u3010'):
met_article_start_char = True
new_raw_html = new_raw_html + '</div><div class="content"><p>' + item + '<p>\n'
else:
if next_is_img_txt == False and next_is_mov_link == False:
item = item.strip()
if next_is_img_txt == False:
if item.startswith("=@"):
next_is_mov_link = True
print 'skip movie link'
elif item.startswith("=?"):
next_is_img_txt = True
new_raw_html += '<img src="' + str(item)[2:].strip() + '.gif" /><p>\n'
elif item.startswith('=='):
next_is_img_txt = True
if False:
# TODO: check existence of .gif first
newimg = '_' + item[2:].strip() + '.jpg'
new_raw_html += '<img src="' + newimg + '" /><p>\n'
else:
new_raw_html += '<img src="' + str(item)[2:].strip() + '.jpg" /><p>\n'
elif item.startswith('='):
next_is_img_txt = True
new_raw_html += '<img src="' + str(item)[1:].strip() + '.jpg" /><p>\n'
if False:
# TODO: check existence of .gif first
newimg = '_' + item[1:].strip() + '.jpg'
new_raw_html += '<img src="' + newimg + '" /><p>\n'
else:
new_raw_html += '<img src="' + str(item)[1:].strip() + '.jpg" /><p>\n'
else:
if item <> '':
if next_is_img_txt == False and met_article_start_char == False:
if next_is_img_txt == False and met_article_start_char == False:
if item <> '':
if title_started == False:
#print 'Title started at ', item
new_raw_html = new_raw_html + '</div><div class="heading">' + item + '\n'
title_started = True
else:
new_raw_html = new_raw_html + item + '\n'
else:
new_raw_html = new_raw_html + item + '<p>\n'
else:
new_raw_html = new_raw_html + item + '<p>\n'
else:
if next_is_mov_link == True:
next_is_mov_link = False
else:
next_is_img_txt = False
new_raw_html = new_raw_html + item + '\n'
return new_raw_html + '</div></body></html>'
next_is_img_txt = False
new_raw_html = new_raw_html + item + '\n'
new_html = new_raw_html + '</div></body></html>'
#raw_html = raw_html.replace(u'<p>\u3010', u'\u3010')
if __HiResImg__ == True:
# TODO: add a _ in front of an image url
if url.rfind('news.mingpao.com') > -1:
imglist = re.findall('src="?.*?jpg"', new_html)
br = mechanize.Browser()
br.set_handle_redirect(False)
for img in imglist:
gifimg = img.replace('jpg"', 'gif"')
try:
br.open_novisit(url + "/../" + gifimg[5:len(gifimg)-1])
new_html = new_html.replace(img, gifimg)
except:
# find the location of the first _
pos = img.find('_')
if pos > -1:
# if found, insert _ after the first _
newimg = img[0:pos] + '_' + img[pos:]
new_html = new_html.replace(img, newimg)
else:
# if not found, insert _ after "
new_html = new_html.replace(img[1:], '"_' + img[1:])
elif url.rfind('life.mingpao.com') > -1:
imglist = re.findall('src=\'?.*?jpg\'', new_html)
br = mechanize.Browser()
br.set_handle_redirect(False)
#print 'Img list: ', imglist, '\n'
for img in imglist:
#print 'Found img: ', img
gifimg = img.replace('jpg\'', 'gif\'')
try:
gifurl = re.sub(r'dailynews.*txt', '', url)
br.open_novisit(gifurl + gifimg[5:len(gifimg)-1])
new_html = new_html.replace(img, gifimg)
except:
pos = img.rfind('/')
newimg = img[0:pos+1] + '_' + img[pos+1:]
new_html = new_html.replace(img, newimg)
# repeat with src quoted by double quotes, for text parsed from src txt
imglist = re.findall('src="?.*?jpg"', new_html)
for img in imglist:
#print 'Found img: ', img
gifimg = img.replace('jpg"', 'gif"')
try:
#print 'url', url
pos = url.rfind('/')
gifurl = url[:pos+1]
#print 'try it:', gifurl + gifimg[5:len(gifimg)-1]
br.open_novisit(gifurl + gifimg[5:len(gifimg)-1])
new_html = new_html.replace(img, gifimg)
except:
pos = img.find('"')
newimg = img[0:pos+1] + '_' + img[pos+1:]
#print 'Use hi-res img', newimg
new_html = new_html.replace(img, newimg)
return new_html
def preprocess_html(self, soup):
for item in soup.findAll(style=True):
del item['style']

View File

@ -8,7 +8,7 @@ class AdvancedUserRecipe1294342201(BasicNewsRecipe):
title = u'New London Day'
__author__ = 'Being'
description = 'State, local and business news from New London, CT'
language = 'en_GB'
language = 'en'
oldest_article = 1
max_articles_per_feed = 200

View File

@ -10,9 +10,8 @@ class AdvancedUserRecipe1286819935(BasicNewsRecipe):
remove_attributes = ['style']
language = 'ru'
feeds = [(u'Articles', u'http://www.novayagazeta.ru/rss_number.xml')]
feeds = [(u'Articles', u'http://www.novayagazeta.ru/rss/all.xml')]
def print_version(self, url):
return url + '?print=true'
return '%s%s' % (url, '?print=1')

View File

@ -1,5 +1,7 @@
# -*- coding: utf-8 -*-
class BasicUserRecipe1318619832(AutomaticNewsRecipe):
from calibre.web.feeds.news import BasicNewsRecipe
class BasicUserRecipe1318619832(BasicNewsRecipe):
title = u'OmgUbuntu'
oldest_article = 7
max_articles_per_feed = 100

View File

@ -0,0 +1,19 @@
from calibre.web.feeds.news import BasicNewsRecipe
class Real_world_economics_review(BasicNewsRecipe):
title = u'Real-world economis review blog'
oldest_article = 7
max_articles_per_feed = 100
use_embedded_content = False
__author__ = 'Julio Map'
language = 'en'
no_stylesheets = True
keep_only_tags = dict(name='div', attrs={'id':'main'})
remove_tags = [dict(name='div', attrs={'id':'postpath'}),
dict(name='div', attrs={'class':'robots-nocontent sd-block sd-social sd-social-icon-text sd-sharing'}),
dict(name='div', attrs={'class':'sharedaddy sd-sharing-enabled'})
]
feeds = [(u'Real-World Economics Review Blog', u'http://rwer.wordpress.com/feed/')]

View File

@ -27,12 +27,12 @@ class ScienceAAS(BasicNewsRecipe):
br = BasicNewsRecipe.get_browser()
if self.username is not None and self.password is not None:
br.open(self.LOGIN)
br.select_form(name='registered_users_form')
br.select_form(nr=1)
br['username'] = self.username
br['code' ] = self.password
br.submit()
return br
keep_only_tags = [ dict(name='div', attrs={'id':'LegacyContent'}) ]
keep_only_tags = [ dict(name='div', attrs={'id':'content-block'}) ]
feeds = [(u"Science: Current Issue", u'http://www.sciencemag.org/rss/current.xml')]

View File

@ -0,0 +1,22 @@
__license__ = 'GPL v3'
__copyright__ = '2011 Neil Grogan'
#
# Silicon Republic Recipe
#
from calibre.web.feeds.news import BasicNewsRecipe
class SiliconRepublic(BasicNewsRecipe):
title = u'Silicon Republic'
oldest_article = 7
max_articles_per_feed = 100
__author__ = u'Neil Grogan'
language = 'en_IE'
remove_tags = [dict(attrs={'class':['thumb','txt','compactbox','icons','catlist','catlistinner','taglist','taglistinner','social','also-in','also-in-inner','also-in-footer','zonek-dfp','paneladvert','rcadvert','panel','h2b']}),
dict(id=['header','logo','header-right','sitesearch','rsslinks','topnav','topvideos','topvideos-list','topnews','topnews-list','slideshow','slides','compactheader','compactnews','compactfeatures','article-type','contactlinks-header','banner-zone-k-dfp','footer-related','directory-services','also-in-section','featuredrelated1','featuredrelated2','featuredrelated3','featuredrelated4','advert2-dfp']),
dict(name=['script', 'style'])]
feeds = [(u'News', u'http://www.siliconrepublic.com/feeds/')]

View File

@ -12,21 +12,18 @@ from calibre.web.feeds.news import BasicNewsRecipe
class DailyTelegraph(BasicNewsRecipe):
title = u'The Australian'
__author__ = u'Matthew Briggs and Sujata Raman'
description = u'National broadsheet newspaper from down under - colloquially known as The Oz'
description = (u'National broadsheet newspaper from down under - colloquially known as The Oz'
'. You will need to have a subscription to '
'http://www.theaustralian.com.au to get full articles.')
language = 'en_AU'
oldest_article = 2
needs_subscription = 'optional'
max_articles_per_feed = 30
remove_javascript = True
no_stylesheets = True
encoding = 'utf8'
html2lrf_options = [
'--comment' , description
, '--category' , 'news, Australia'
, '--publisher' , title
]
keep_only_tags = [dict(name='div', attrs={'id': 'story'})]
#remove_tags = [dict(name=['object','link'])]
@ -67,6 +64,19 @@ class DailyTelegraph(BasicNewsRecipe):
(u'Commercial Property', u'http://feeds.news.com.au/public/rss/2.0/aus_business_commercial_property_708.xml'),
(u'Mining', u'http://feeds.news.com.au/public/rss/2.0/aus_business_mining_704.xml')]
def get_browser(self):
br = BasicNewsRecipe.get_browser(self)
if self.username and self.password:
br.open('http://www.theaustralian.com.au')
br.select_form(nr=0)
br['username'] = self.username
br['password'] = self.password
raw = br.submit().read()
if '>log out' not in raw.lower():
raise ValueError('Failed to log in to www.theaustralian.com.au'
' are your username and password correct?')
return br
def get_article_url(self, article):
return article.id
@ -76,14 +86,4 @@ class DailyTelegraph(BasicNewsRecipe):
#return br.geturl()
def get_cover_url(self):
href = 'http://www.theaustralian.news.com.au/'
soup = self.index_to_soup(href)
img = soup.find('img',alt ="AUS HP promo digital2")
print img
if img :
cover_url = img['src']
return cover_url

View File

@ -1,37 +1,64 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Darko Miletic <darko.miletic at gmail.com>'
__copyright__ = '2008 - 2011, Darko Miletic <darko.miletic at gmail.com>'
'''
thescotsman.scotsman.com
www.scotsman.com/the-scotsman
'''
from calibre.web.feeds.news import BasicNewsRecipe
class TheScotsman(BasicNewsRecipe):
title = u'The Scotsman'
title = 'The Scotsman'
__author__ = 'Darko Miletic'
description = 'News from Scotland'
oldest_article = 7
publisher = 'Johnston Publishing Ltd.'
category = 'news, politics, Scotland, UK'
oldest_article = 2
max_articles_per_feed = 100
no_stylesheets = True
use_embedded_content = False
language = 'en_GB'
simultaneous_downloads = 1
keep_only_tags = [dict(name='div', attrs={'id':'viewarticle'})]
remove_tags = [
dict(name='div' , attrs={'class':'viewarticlepanel' })
]
language = 'en_GB'
encoding = 'utf-8'
publication_type = 'newspaper'
remove_empty_feeds = True
masthead_url = 'http://www.scotsman.com/webimage/swts_thescotsman_image_e_7_25526!image/3142543874.png_gen/derivatives/default/3142543874.png'
extra_css = 'body{font-family: Arial,Helvetica,sans-serif}'
keep_only_tags = [dict(attrs={'class':'editorialSection'})]
remove_tags_after = dict(attrs={'class':'socialBookmarkPanel'})
remove_tags = [
dict(name=['meta','iframe','object','embed','link']),
dict(attrs={'class':['secondaryArticlesNav','socialBookmarkPanel']}),
dict(attrs={'id':'relatedArticles'})
]
remove_attributes = ['lang']
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
}
feeds = [
(u'Latest National News', u'http://thescotsman.scotsman.com/getFeed.aspx?Format=rss&sectionid=4068'),
('UK', 'http://thescotsman.scotsman.com/getfeed.aspx?sectionid=7071&format=rss'),
('Scotland', 'http://thescotsman.scotsman.com/getfeed.aspx?sectionid=7042&format=rss'),
('International', 'http://thescotsman.scotsman.com/getfeed.aspx?sectionid=7000&format=rss'),
('Politics', 'http://thescotsman.scotsman.com/getfeed.aspx?sectionid=6990&format=rss'),
('Entertainment', 'http://thescotsman.scotsman.com/getfeed.aspx?sectionid=7010&format=rss'),
('Features', 'http://thescotsman.scotsman.com/getfeed.aspx?sectionid=6996&format=rss'),
('Opinion', 'http://thescotsman.scotsman.com/getfeed.aspx?sectionid=7074&format=rss'),
('Latest News' , 'http://www.scotsman.com/cmlink/1.957140' ),
('UK' , 'http://www.scotsman.com/cmlink/1.957142' ),
('Scotland' , 'http://www.scotsman.com/cmlink/1.957141' ),
('International', 'http://www.scotsman.com/cmlink/1.957143' ),
('Politics' , 'http://www.scotsman.com/cmlink/1.957044' ),
('Arts' , 'http://www.scotsman.com/cmlink/1.1804825'),
('Entertainment', 'http://www.scotsman.com/cmlink/1.957053' ),
('Sports' , 'http://www.scotsman.com/cmlink/1.957151' ),
('Business' , 'http://www.scotsman.com/cmlink/1.957156' ),
('Features' , 'http://www.scotsman.com/cmlink/1.957149' ),
('Opinion' , 'http://www.scotsman.com/cmlink/1.957054' )
]
def preprocess_html(self, soup):
for item in soup.findAll(style=True):
del item['style']
for item in soup.findAll('img'):
if not item.has_key('alt'):
item['alt'] = 'image'
return soup

View File

@ -7,15 +7,15 @@ msgid ""
msgstr ""
"Project-Id-Version: calibre\n"
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
"POT-Creation-Date: 2011-09-02 16:21+0000\n"
"PO-Revision-Date: 2011-09-21 13:48+0000\n"
"Last-Translator: Jellby <Unknown>\n"
"POT-Creation-Date: 2011-09-27 14:31+0000\n"
"PO-Revision-Date: 2011-10-22 22:04+0000\n"
"Last-Translator: Fitoschido <fitoschido@gmail.com>\n"
"Language-Team: Spanish <es@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-09-22 04:47+0000\n"
"X-Generator: Launchpad (build 13996)\n"
"X-Launchpad-Export-Date: 2011-10-23 05:13+0000\n"
"X-Generator: Launchpad (build 14170)\n"
#. name for aaa
msgid "Ghotuo"
@ -5911,7 +5911,7 @@ msgstr "Gwahatike"
#. name for dai
msgid "Day"
msgstr "Day"
msgstr "Día"
#. name for daj
msgid "Daju; Dar Fur"
@ -18231,7 +18231,7 @@ msgstr ""
#. name for nhi
msgid "Nahuatl; Zacatlán-Ahuacatlán-Tepetzintla"
msgstr "Náhuatl de Zacatlán; Ahuacatlán y Tepetzintla"
msgstr "Náhuatl de Zacatlán-Ahuacatlán-Tepetzintla"
#. name for nhk
msgid "Nahuatl; Isthmus-Cosoleacaque"

View File

@ -9,49 +9,49 @@ msgstr ""
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
"devel@lists.alioth.debian.org>\n"
"POT-Creation-Date: 2011-09-27 14:31+0000\n"
"PO-Revision-Date: 2011-09-27 18:23+0000\n"
"Last-Translator: Kovid Goyal <Unknown>\n"
"PO-Revision-Date: 2011-10-15 17:29+0000\n"
"Last-Translator: Devilinside <Unknown>\n"
"Language-Team: Hungarian <debian-l10n-hungarian@lists.d.o>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-09-28 04:50+0000\n"
"X-Generator: Launchpad (build 14049)\n"
"X-Launchpad-Export-Date: 2011-10-16 05:14+0000\n"
"X-Generator: Launchpad (build 14124)\n"
"X-Poedit-Country: HUNGARY\n"
"Language: hu\n"
"X-Poedit-Language: Hungarian\n"
#. name for aaa
msgid "Ghotuo"
msgstr ""
msgstr "Ghotuo"
#. name for aab
msgid "Alumu-Tesu"
msgstr ""
msgstr "Alumu-Tesu"
#. name for aac
msgid "Ari"
msgstr ""
msgstr "Ari"
#. name for aad
msgid "Amal"
msgstr ""
msgstr "Amal"
#. name for aae
msgid "Albanian; Arbëreshë"
msgstr ""
msgstr "Albán; Arbëreshë"
#. name for aaf
msgid "Aranadan"
msgstr ""
msgstr "Aranadan"
#. name for aag
msgid "Ambrak"
msgstr ""
msgstr "Ambrak"
#. name for aah
msgid "Arapesh; Abu'"
msgstr ""
msgstr "Arapesh; Abu'"
#. name for aai
msgid "Arifama-Miniafia"
@ -75,7 +75,7 @@ msgstr ""
#. name for aao
msgid "Arabic; Algerian Saharan"
msgstr ""
msgstr "Arab; Algériai Szaharai"
#. name for aap
msgid "Arára; Pará"
@ -87,7 +87,7 @@ msgstr ""
#. name for aar
msgid "Afar"
msgstr "afar"
msgstr "Afar"
#. name for aas
msgid "Aasáx"
@ -498,10 +498,9 @@ msgstr ""
msgid "Tapei"
msgstr ""
# src/trans.h:281 src/trans.h:318
#. name for afr
msgid "Afrikaans"
msgstr "afrikaans"
msgstr "Afrikaans"
#. name for afs
msgid "Creole; Afro-Seminole"
@ -801,7 +800,7 @@ msgstr ""
#. name for aka
msgid "Akan"
msgstr "akan"
msgstr "Akan"
#. name for akb
msgid "Batak Angkola"
@ -1015,10 +1014,9 @@ msgstr ""
msgid "Amarag"
msgstr ""
# src/trans.h:283
#. name for amh
msgid "Amharic"
msgstr "amhara"
msgstr "Amhara"
#. name for ami
msgid "Amis"
@ -1425,10 +1423,9 @@ msgstr ""
msgid "Arrarnta; Western"
msgstr ""
# src/trans.h:294
#. name for arg
msgid "Aragonese"
msgstr "aragóniai"
msgstr "Aragóniai"
#. name for arh
msgid "Arhuaco"
@ -1548,7 +1545,7 @@ msgstr ""
#. name for asm
msgid "Assamese"
msgstr "asszámi"
msgstr "Asszámi"
#. name for asn
msgid "Asuriní; Xingú"
@ -1790,10 +1787,9 @@ msgstr ""
msgid "Arabic; Uzbeki"
msgstr ""
# src/trans.h:283
#. name for ava
msgid "Avaric"
msgstr "avar"
msgstr "Avar"
#. name for avb
msgid "Avau"
@ -1805,7 +1801,7 @@ msgstr ""
#. name for ave
msgid "Avestan"
msgstr "aveszti"
msgstr "Avesztai"
#. name for avi
msgid "Avikam"
@ -1945,7 +1941,7 @@ msgstr ""
#. name for ayc
msgid "Aymara; Southern"
msgstr ""
msgstr "Ajmara; Déli"
#. name for ayd
msgid "Ayabadhu"
@ -1977,7 +1973,7 @@ msgstr ""
#. name for aym
msgid "Aymara"
msgstr "aymara"
msgstr "Ajmara"
#. name for ayn
msgid "Arabic; Sanaani"
@ -1997,7 +1993,7 @@ msgstr ""
#. name for ayr
msgid "Aymara; Central"
msgstr ""
msgstr "Ajmara; Közép"
#. name for ays
msgid "Ayta; Sorsogon"
@ -2025,12 +2021,11 @@ msgstr ""
#. name for azb
msgid "Azerbaijani; South"
msgstr ""
msgstr "Azeri; Déli"
# src/trans.h:311
#. name for aze
msgid "Azerbaijani"
msgstr "azeri"
msgstr "Azeri"
#. name for azg
msgid "Amuzgo; San Pedro Amuzgos"
@ -2038,7 +2033,7 @@ msgstr ""
#. name for azj
msgid "Azerbaijani; North"
msgstr ""
msgstr "Azeri; Északi"
#. name for azm
msgid "Amuzgo; Ipalapa"
@ -2090,7 +2085,7 @@ msgstr ""
#. name for bak
msgid "Bashkir"
msgstr "baskír"
msgstr "Baskír"
#. name for bal
msgid "Baluchi"
@ -2115,7 +2110,7 @@ msgstr ""
#. name for bar
msgid "Bavarian"
msgstr ""
msgstr "Bajor"
#. name for bas
msgid "Basa (Cameroon)"
@ -2497,10 +2492,9 @@ msgstr "beja"
msgid "Bebeli"
msgstr ""
# src/trans.h:286
#. name for bel
msgid "Belarusian"
msgstr "belorusz"
msgstr "Belarusz"
#. name for bem
msgid "Bemba (Zambia)"
@ -2508,7 +2502,7 @@ msgstr ""
#. name for ben
msgid "Bengali"
msgstr "bengáli"
msgstr "Bengáli"
#. name for beo
msgid "Beami"
@ -3510,10 +3504,9 @@ msgstr ""
msgid "Borôro"
msgstr ""
# src/trans.h:309
#. name for bos
msgid "Bosnian"
msgstr "bosnyák"
msgstr "Bosnyák"
#. name for bot
msgid "Bongo"
@ -3685,7 +3678,7 @@ msgstr ""
#. name for bqn
msgid "Bulgarian Sign Language"
msgstr ""
msgstr "Bolgár jelnyelv"
#. name for bqo
msgid "Balo"
@ -4078,10 +4071,9 @@ msgstr ""
msgid "Bugawac"
msgstr ""
# src/trans.h:285
#. name for bul
msgid "Bulgarian"
msgstr "bolgár"
msgstr "Bolgár"
#. name for bum
msgid "Bulu (Cameroon)"
@ -7445,10 +7437,9 @@ msgstr ""
msgid "Semimi"
msgstr ""
# src/trans.h:284
#. name for eus
msgid "Basque"
msgstr "baszk"
msgstr "Baszk"
#. name for eve
msgid "Even"
@ -7534,10 +7525,9 @@ msgstr ""
msgid "Fang (Equatorial Guinea)"
msgstr ""
# src/trans.h:294
#. name for fao
msgid "Faroese"
msgstr "feröi"
msgstr "Feröeri"
#. name for fap
msgid "Palor"
@ -29414,7 +29404,7 @@ msgstr ""
#. name for xzp
msgid "Zapotec; Ancient"
msgstr ""
msgstr "Zapoték; Ősi"
#. name for yaa
msgid "Yaminahua"
@ -30326,27 +30316,27 @@ msgstr ""
#. name for zaa
msgid "Zapotec; Sierra de Juárez"
msgstr ""
msgstr "Zapoték; Sierra de Juárezi"
#. name for zab
msgid "Zapotec; San Juan Guelavía"
msgstr ""
msgstr "Zapoték; San Juan Guelavíai"
#. name for zac
msgid "Zapotec; Ocotlán"
msgstr ""
msgstr "Zapoték; Ocotláni"
#. name for zad
msgid "Zapotec; Cajonos"
msgstr "zapoték; Cajonos"
msgstr "Zapoték; Cajonesi"
#. name for zae
msgid "Zapotec; Yareni"
msgstr "zapoték; Yareni"
msgstr "Zapoték; Yareni"
#. name for zaf
msgid "Zapotec; Ayoquesco"
msgstr ""
msgstr "Zapoték; Ayoquescoi"
#. name for zag
msgid "Zaghawa"
@ -30358,7 +30348,7 @@ msgstr "zangval"
#. name for zai
msgid "Zapotec; Isthmus"
msgstr "zapoték; Isthmus"
msgstr "Zapoték; Isthmusi"
#. name for zaj
msgid "Zaramo"
@ -30374,31 +30364,31 @@ msgstr "zozu"
#. name for zam
msgid "Zapotec; Miahuatlán"
msgstr ""
msgstr "Zapoték; Miahuatláni"
#. name for zao
msgid "Zapotec; Ozolotepec"
msgstr ""
msgstr "Zapoték; Ozolotepeci"
#. name for zap
msgid "Zapotec"
msgstr "zapoték"
msgstr "Zapoték"
#. name for zaq
msgid "Zapotec; Aloápam"
msgstr ""
msgstr "Zapoték; Aloápami"
#. name for zar
msgid "Zapotec; Rincón"
msgstr "zapoték; Rincón"
msgstr "Zapoték; Rincóni"
#. name for zas
msgid "Zapotec; Santo Domingo Albarradas"
msgstr ""
msgstr "Zapoték; Santo Domingo Albarradasi"
#. name for zat
msgid "Zapotec; Tabaa"
msgstr "zapoték; Tabaa"
msgstr "Zapoték; Tabaa-i"
# src/trans.h:193
#. name for zau
@ -30407,15 +30397,15 @@ msgstr "zangskari"
#. name for zav
msgid "Zapotec; Yatzachi"
msgstr ""
msgstr "Zapoték; Yatzachi-i"
#. name for zaw
msgid "Zapotec; Mitla"
msgstr "zapoték; Mitla"
msgstr "Zapoték; Mitlai"
#. name for zax
msgid "Zapotec; Xadani"
msgstr "zapoték; Xadani"
msgstr "Zapoték; Xadani-i"
#. name for zay
msgid "Zayse-Zergulla"
@ -30991,7 +30981,7 @@ msgstr "tokano"
#. name for zul
msgid "Zulu"
msgstr "zulu"
msgstr "Zulu"
# src/trans.h:316
#. name for zum

View File

@ -10,14 +10,14 @@ msgstr ""
"Report-Msgid-Bugs-To: Debian iso-codes team <pkg-isocodes-"
"devel@lists.alioth.debian.org>\n"
"POT-Creation-Date: 2011-09-27 14:31+0000\n"
"PO-Revision-Date: 2011-09-27 18:36+0000\n"
"Last-Translator: Kovid Goyal <Unknown>\n"
"PO-Revision-Date: 2011-10-25 19:06+0000\n"
"Last-Translator: zeugma <Unknown>\n"
"Language-Team: Turkish <gnome-turk@gnome.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-09-28 05:12+0000\n"
"X-Generator: Launchpad (build 14049)\n"
"X-Launchpad-Export-Date: 2011-10-26 05:13+0000\n"
"X-Generator: Launchpad (build 14189)\n"
"Language: tr\n"
#. name for aaa
@ -54,7 +54,7 @@ msgstr ""
#. name for aai
msgid "Arifama-Miniafia"
msgstr ""
msgstr "Arifama-Miniafia"
#. name for aak
msgid "Ankave"
@ -122,7 +122,7 @@ msgstr "Bankon"
#. name for abc
msgid "Ayta; Ambala"
msgstr ""
msgstr "Ayta; Ambala"
#. name for abd
msgid "Manide"
@ -130,11 +130,11 @@ msgstr "Manide"
#. name for abe
msgid "Abnaki; Western"
msgstr ""
msgstr "Abnaki; Western"
#. name for abf
msgid "Abai Sungai"
msgstr ""
msgstr "Abai Sungai"
#. name for abg
msgid "Abaga"
@ -146,7 +146,7 @@ msgstr "Arapça; Tacikçe"
#. name for abi
msgid "Abidji"
msgstr ""
msgstr "Abidji"
#. name for abj
msgid "Aka-Bea"
@ -158,7 +158,7 @@ msgstr "Abhazca"
#. name for abl
msgid "Lampung Nyo"
msgstr ""
msgstr "Lampung Nyo"
#. name for abm
msgid "Abanyom"
@ -282,7 +282,7 @@ msgstr "Achterhoeks"
#. name for acu
msgid "Achuar-Shiwiar"
msgstr ""
msgstr "Achuar-Shiwiar"
#. name for acv
msgid "Achumawi"

View File

@ -4,7 +4,7 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
__appname__ = u'calibre'
numeric_version = (0, 8, 22)
numeric_version = (0, 8, 24)
__version__ = u'.'.join(map(unicode, numeric_version))
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"

View File

@ -341,7 +341,9 @@ class DB(object):
if f['is_custom']]
for f in fmvals:
self.create_custom_column(f['label'], f['name'],
f['datatype'], f['is_multiple'] is not None,
f['datatype'],
(f['is_multiple'] is not None and
len(f['is_multiple']) > 0),
f['is_editable'], f['display'])
defs = self.prefs.defaults

View File

@ -77,7 +77,7 @@ class ANDROID(USBMS):
# Samsung
0x04e8 : { 0x681d : [0x0222, 0x0223, 0x0224, 0x0400],
0x681c : [0x0222, 0x0224, 0x0400],
0x681c : [0x0222, 0x0223, 0x0224, 0x0400],
0x6640 : [0x0100],
0x685b : [0x0400],
0x685e : [0x0400],

View File

@ -0,0 +1,112 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2011, Timothy Legge <timlegge@gmail.com> and Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
from contextlib import closing
import sqlite3 as sqlite
class Bookmark(): # {{{
'''
A simple class fetching bookmark data
kobo-specific
'''
def __init__(self, db_path, contentid, path, id, book_format, bookmark_extension):
self.book_format = book_format
self.bookmark_extension = bookmark_extension
self.book_length = 0 # Not Used
self.id = id
self.last_read = 0
self.last_read_location = 0 # Not Used
self.path = path
self.timestamp = 0
self.user_notes = None
self.db_path = db_path
self.contentid = contentid
self.percent_read = 0
self.get_bookmark_data()
self.get_book_length() # Not Used
def get_bookmark_data(self):
''' Return the timestamp and last_read_location '''
user_notes = {}
self.timestamp = os.path.getmtime(self.path)
with closing(sqlite.connect(self.db_path)) as connection:
# return bytestrings if the content cannot the decoded as unicode
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
cursor = connection.cursor()
t = (self.contentid,)
cursor.execute('select bm.bookmarkid, bm.contentid, bm.volumeid, '
'bm.text, bm.annotation, bm.ChapterProgress, '
'bm.StartContainerChildIndex, bm.StartOffset, c.BookTitle, '
'c.TITLE, c.volumeIndex, c.___NumPages '
'from Bookmark bm inner join Content c on '
'bm.contentid = c.contentid and '
'bm.volumeid = ? order by bm.volumeid, bm.chapterprogress', t)
previous_chapter = 0
bm_count = 0
for row in cursor:
current_chapter = row[10]
if previous_chapter == current_chapter:
bm_count = bm_count + 1
else:
bm_count = 0
text = row[3]
annotation = row[4]
# A dog ear (bent upper right corner) is a bookmark
if row[6] == row[7] == 0: # StartContainerChildIndex = StartOffset = 0
e_type = 'Bookmark'
text = row[9]
# highlight is text with no annotation
elif text is not None and (annotation is None or annotation == ""):
e_type = 'Highlight'
elif text and annotation:
e_type = 'Annotation'
else:
e_type = 'Unknown annotation type'
note_id = row[10] + bm_count
chapter_title = row[9]
# book_title = row[8]
chapter_progress = min(round(float(100*row[5]),2),100)
user_notes[note_id] = dict(id=self.id,
displayed_location=note_id,
type=e_type,
text=text,
annotation=annotation,
chapter=row[10],
chapter_title=chapter_title,
chapter_progress=chapter_progress)
previous_chapter = row[10]
# debug_print("e_type:" , e_type, '\t', 'loc: ', note_id, 'text: ', text,
# 'annotation: ', annotation, 'chapter_title: ', chapter_title,
# 'chapter_progress: ', chapter_progress, 'date: ')
cursor.execute('select datelastread, ___PercentRead from content '
'where bookid is Null and '
'contentid = ?', t)
for row in cursor:
self.last_read = row[0]
self.percent_read = row[1]
# print row[1]
cursor.close()
# self.last_read_location = self.last_read - self.pdf_page_offset
self.user_notes = user_notes
def get_book_length(self):
#TL self.book_length = 0
#TL self.book_length = int(unpack('>I', record0[0x04:0x08])[0])
pass
# }}}

View File

@ -2,15 +2,16 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Timothy Legge <timlegge at gmail.com> and Kovid Goyal <kovid@kovidgoyal.net>'
__copyright__ = '2010, Timothy Legge <timlegge@gmail.com> and Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import os, time, calendar
import sqlite3 as sqlite
from contextlib import closing
from calibre.devices.usbms.books import BookList
from calibre.devices.kobo.books import Book
from calibre.devices.kobo.books import ImageWrapper
from calibre.devices.kobo.bookmark import Bookmark
from calibre.devices.mime import mime_type_ext
from calibre.devices.usbms.driver import USBMS, debug_print
from calibre import prints
@ -24,7 +25,7 @@ class KOBO(USBMS):
gui_name = 'Kobo Reader'
description = _('Communicate with the Kobo Reader')
author = 'Timothy Legge'
version = (1, 0, 10)
version = (1, 0, 11)
dbversion = 0
fwversion = 0
@ -47,6 +48,7 @@ class KOBO(USBMS):
EBOOK_DIR_MAIN = ''
SUPPORTS_SUB_DIRS = True
SUPPORTS_ANNOTATIONS = True
VIRTUAL_BOOK_EXTENSIONS = frozenset(['kobo'])
@ -77,11 +79,6 @@ class KOBO(USBMS):
self.book_class = Book
self.dbversion = 7
def create_annotations_path(self, mdata, device_path=None):
if device_path:
return device_path
return USBMS.create_annotations_path(self, mdata)
def books(self, oncard=None, end_session=True):
from calibre.ebooks.metadata.meta import path_to_ext
@ -111,6 +108,7 @@ class KOBO(USBMS):
if self.fwversion != '1.0' and self.fwversion != '1.4':
self.has_kepubs = True
debug_print('Version of driver: ', self.version, 'Has kepubs:', self.has_kepubs)
debug_print('Version of firmware: ', self.fwversion, 'Has kepubs:', self.has_kepubs)
self.booklist_class.rebuild_collections = self.rebuild_collections
@ -376,7 +374,7 @@ class KOBO(USBMS):
path_prefix = '.kobo/images/'
path = self._main_prefix + path_prefix + ImageID
file_endings = (' - iPhoneThumbnail.parsed', ' - bbMediumGridList.parsed', ' - NickelBookCover.parsed', ' - N3_LIBRARY_FULL.parsed', ' - N3_LIBRARY_GRID.parsed', ' - N3_LIBRARY_LIST.parsed', ' - N3_SOCIAL_CURRENTREAD.parsed',)
file_endings = (' - iPhoneThumbnail.parsed', ' - bbMediumGridList.parsed', ' - NickelBookCover.parsed', ' - N3_LIBRARY_FULL.parsed', ' - N3_LIBRARY_GRID.parsed', ' - N3_LIBRARY_LIST.parsed', ' - N3_SOCIAL_CURRENTREAD.parsed', ' - N3_FULL.parsed',)
for ending in file_endings:
fpath = path + ending
@ -852,6 +850,7 @@ class KOBO(USBMS):
' - N3_LIBRARY_FULL.parsed':(355,530),
' - N3_LIBRARY_GRID.parsed':(149,233),
' - N3_LIBRARY_LIST.parsed':(60,90),
' - N3_FULL.parsed':(600,800),
' - N3_SOCIAL_CURRENTREAD.parsed':(120,186)}
for ending, resize in file_endings.items():
@ -892,3 +891,198 @@ class KOBO(USBMS):
tf.write(r.read())
paths[idx] = tf.name
return paths
def create_annotations_path(self, mdata, device_path=None):
if device_path:
return device_path
return USBMS.create_annotations_path(self, mdata)
def get_annotations(self, path_map):
EPUB_FORMATS = [u'epub']
epub_formats = set(EPUB_FORMATS)
def get_storage():
storage = []
if self._main_prefix:
storage.append(os.path.join(self._main_prefix, self.EBOOK_DIR_MAIN))
if self._card_a_prefix:
storage.append(os.path.join(self._card_a_prefix, self.EBOOK_DIR_CARD_A))
if self._card_b_prefix:
storage.append(os.path.join(self._card_b_prefix, self.EBOOK_DIR_CARD_B))
return storage
def resolve_bookmark_paths(storage, path_map):
pop_list = []
book_ext = {}
for id in path_map:
file_fmts = set()
for fmt in path_map[id]['fmts']:
file_fmts.add(fmt)
bookmark_extension = None
if file_fmts.intersection(epub_formats):
book_extension = list(file_fmts.intersection(epub_formats))[0]
bookmark_extension = 'epub'
if bookmark_extension:
for vol in storage:
bkmk_path = path_map[id]['path']
bkmk_path = bkmk_path
if os.path.exists(bkmk_path):
path_map[id] = bkmk_path
book_ext[id] = book_extension
break
else:
pop_list.append(id)
else:
pop_list.append(id)
# Remove non-existent bookmark templates
for id in pop_list:
path_map.pop(id)
return path_map, book_ext
storage = get_storage()
path_map, book_ext = resolve_bookmark_paths(storage, path_map)
bookmarked_books = {}
for id in path_map:
extension = os.path.splitext(path_map[id])[1]
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(path_map[id])
ContentID = self.contentid_from_path(path_map[id], ContentType)
bookmark_ext = extension
db_path = self.normalize_path(self._main_prefix + '.kobo/KoboReader.sqlite')
myBookmark = Bookmark(db_path, ContentID, path_map[id], id, book_ext[id], bookmark_ext)
bookmarked_books[id] = self.UserAnnotation(type='kobo_bookmark', value=myBookmark)
# This returns as job.result in gui2.ui.annotations_fetched(self,job)
return bookmarked_books
def generate_annotation_html(self, bookmark):
from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag, NavigableString
# Returns <div class="user_annotations"> ... </div>
#last_read_location = bookmark.last_read_location
#timestamp = bookmark.timestamp
percent_read = bookmark.percent_read
debug_print("Date: ", bookmark.last_read)
if bookmark.last_read is not None:
try:
last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(calendar.timegm(time.strptime(bookmark.last_read, "%Y-%m-%dT%H:%M:%S"))))
except:
last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(calendar.timegm(time.strptime(bookmark.last_read, "%Y-%m-%dT%H:%M:%S.%f"))))
else:
#self.datetime = time.gmtime()
last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
# debug_print("Percent read: ", percent_read)
ka_soup = BeautifulSoup()
dtc = 0
divTag = Tag(ka_soup,'div')
divTag['class'] = 'user_annotations'
# Add the last-read location
spanTag = Tag(ka_soup, 'span')
spanTag['style'] = 'font-weight:normal'
if bookmark.book_format == 'epub':
spanTag.insert(0,NavigableString(
_("<hr /><b>Book Last Read:</b> %(time)s<br /><b>Percentage Read:</b> %(pr)d%%<hr />") % \
dict(time=last_read,
#loc=last_read_location,
pr=percent_read)))
else:
spanTag.insert(0,NavigableString(
_("<hr /><b>Book Last Read:</b> %(time)s<br /><b>Percentage Read:</b> %(pr)d%%<hr />") % \
dict(time=last_read,
#loc=last_read_location,
pr=percent_read)))
divTag.insert(dtc, spanTag)
dtc += 1
divTag.insert(dtc, Tag(ka_soup,'br'))
dtc += 1
if bookmark.user_notes:
user_notes = bookmark.user_notes
annotations = []
# Add the annotations sorted by location
for location in sorted(user_notes):
if user_notes[location]['type'] == 'Bookmark':
annotations.append(
_('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b><br /><b>Chapter Progress:</b> %(chapter_progress)s%%<br />%(annotation)s<br /><hr />') % \
dict(chapter=user_notes[location]['chapter'],
dl=user_notes[location]['displayed_location'],
typ=user_notes[location]['type'],
chapter_title=user_notes[location]['chapter_title'],
chapter_progress=user_notes[location]['chapter_progress'],
annotation=user_notes[location]['annotation'] if user_notes[location]['annotation'] is not None else ""))
elif user_notes[location]['type'] == 'Highlight':
annotations.append(
_('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b><br /><b>Chapter Progress:</b> %(chapter_progress)s%%<br /><b>Highlight:</b> %(text)s<br /><hr />') % \
dict(chapter=user_notes[location]['chapter'],
dl=user_notes[location]['displayed_location'],
typ=user_notes[location]['type'],
chapter_title=user_notes[location]['chapter_title'],
chapter_progress=user_notes[location]['chapter_progress'],
text=user_notes[location]['text']))
elif user_notes[location]['type'] == 'Annotation':
annotations.append(
_('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b><br /><b>Chapter Progress:</b> %(chapter_progress)s%%<br /><b>Highlight:</b> %(text)s<br /><b>Notes:</b> %(annotation)s<br /><hr />') % \
dict(chapter=user_notes[location]['chapter'],
dl=user_notes[location]['displayed_location'],
typ=user_notes[location]['type'],
chapter_title=user_notes[location]['chapter_title'],
chapter_progress=user_notes[location]['chapter_progress'],
text=user_notes[location]['text'],
annotation=user_notes[location]['annotation']))
else:
annotations.append(
_('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b><br /><b>Chapter Progress:</b> %(chapter_progress)s%%<br /><b>Highlight:</b> %(text)s<br /><b>Notes:</b> %(annotation)s<br /><hr />') % \
dict(chapter=user_notes[location]['chapter'],
dl=user_notes[location]['displayed_location'],
typ=user_notes[location]['type'],
chapter_title=user_notes[location]['chapter_title'],
chapter_progress=user_notes[location]['chapter_progress'],
text=user_notes[location]['text'], \
annotation=user_notes[location]['annotation']))
for annotation in annotations:
divTag.insert(dtc, annotation)
dtc += 1
ka_soup.insert(0,divTag)
return ka_soup
def add_annotation_to_library(self, db, db_id, annotation):
from calibre.ebooks.BeautifulSoup import Tag
bm = annotation
ignore_tags = set(['Catalog', 'Clippings'])
if bm.type == 'kobo_bookmark':
mi = db.get_metadata(db_id, index_is_id=True)
user_notes_soup = self.generate_annotation_html(bm.value)
if mi.comments:
a_offset = mi.comments.find('<div class="user_annotations">')
ad_offset = mi.comments.find('<hr class="annotations_divider" />')
if a_offset >= 0:
mi.comments = mi.comments[:a_offset]
if ad_offset >= 0:
mi.comments = mi.comments[:ad_offset]
if set(mi.tags).intersection(ignore_tags):
return
if mi.comments:
hrTag = Tag(user_notes_soup,'hr')
hrTag['class'] = 'annotations_divider'
user_notes_soup.insert(0, hrTag)
mi.comments += unicode(user_notes_soup.prettify())
else:
mi.comments = unicode(user_notes_soup.prettify())
# Update library comments
db.set_comment(db_id, mi.comments)
# Add bookmark file to db_id
db.add_format_with_hooks(db_id, bm.value.bookmark_extension,
bm.value.path, index_is_id=True)

View File

@ -224,7 +224,7 @@ class TREKSTOR(USBMS):
FORMATS = ['epub', 'txt', 'pdf']
VENDOR_ID = [0x1e68]
PRODUCT_ID = [0x0041, 0x0042,
PRODUCT_ID = [0x0041, 0x0042, 0x0052,
0x003e # This is for the EBOOK_PLAYER_5M https://bugs.launchpad.net/bugs/792091
]
BCD = [0x0002]

View File

@ -20,9 +20,8 @@ from calibre.devices.usbms.driver import USBMS, debug_print
from calibre.devices.usbms.device import USBDevice
from calibre.devices.usbms.books import CollectionsBookList
from calibre.devices.usbms.books import BookList
from calibre.ebooks.metadata import authors_to_sort_string
from calibre.ebooks.metadata import authors_to_sort_string, authors_to_string
from calibre.constants import islinux
from calibre.ebooks.metadata import authors_to_string, authors_to_sort_string
DBPATH = 'Sony_Reader/database/books.db'
THUMBPATH = 'Sony_Reader/database/cache/books/%s/thumbnail/main_thumbnail.jpg'
@ -40,7 +39,8 @@ class PRST1(USBMS):
path_sep = '/'
booklist_class = CollectionsBookList
FORMATS = ['epub', 'pdf', 'txt']
FORMATS = ['epub', 'pdf', 'txt', 'book', 'zbf'] # The last two are
# used in japan
CAN_SET_METADATA = ['collections']
CAN_DO_DEVICE_DB_PLUGBOARD = True
@ -112,8 +112,10 @@ class PRST1(USBMS):
def post_open_callback(self):
# Set the thumbnail width to the theoretical max if the user has asked
# that we do not preserve aspect ratio
if not self.settings().extra_customization[self.OPT_PRESERVE_ASPECT_RATIO]:
ec = self.settings().extra_customization
if not ec[self.OPT_PRESERVE_ASPECT_RATIO]:
self.THUMBNAIL_WIDTH = 108
self.WANTS_UPDATED_THUMBNAILS = ec[self.OPT_REFRESH_COVERS]
# Make sure the date offset is set to none, we'll calculate it in books.
self.device_offset = None
@ -186,7 +188,7 @@ class PRST1(USBMS):
if self.device_offset is None:
query = 'SELECT file_path, modified_date FROM books'
cursor.execute(query)
time_offsets = {}
for i, row in enumerate(cursor):
comp_date = int(os.path.getmtime(self.normalize_path(prefix + row[0])) * 1000);
@ -194,7 +196,7 @@ class PRST1(USBMS):
offset = device_date - comp_date
time_offsets.setdefault(offset, 0)
time_offsets[offset] = time_offsets[offset] + 1
try:
device_offset = max(time_offsets,key = lambda a: time_offsets.get(a))
debug_print("Device Offset: %d ms"%device_offset)
@ -304,7 +306,7 @@ class PRST1(USBMS):
if use_sony_authors:
author = newmi.authors[0]
else:
author = authors_to_string(newmi.authors)
author = authors_to_string(newmi.authors)
except:
author = _('Unknown')
title = newmi.title or _('Unknown')
@ -348,7 +350,7 @@ class PRST1(USBMS):
if self.is_sony_periodical(book):
self.periodicalize_book(connection, book)
for book, bookId in db_books.items():
if bookId is not None:
# Remove From Collections
@ -531,7 +533,7 @@ class PRST1(USBMS):
if book.pubdate.date() < date(2010, 10, 17):
return False
return True
def periodicalize_book(self, connection, book):
if not self.is_sony_periodical(book):
return
@ -555,19 +557,19 @@ class PRST1(USBMS):
pubdate = int(time.mktime(book.pubdate.timetuple()) * 1000)
except:
pass
cursor = connection.cursor()
query = '''
UPDATE books
SET conforms_to = 'http://xmlns.sony.net/e-book/prs/periodicals/1.0/newspaper/1.0',
periodical_name = ?,
description = ?,
publication_date = ?
publication_date = ?
WHERE _id = ?
'''
t = (name, None, pubdate, book.bookId,)
cursor.execute(query, t)
connection.commit()
cursor.close()

View File

@ -30,7 +30,7 @@ BOOK_EXTENSIONS = ['lrf', 'rar', 'zip', 'rtf', 'lit', 'txt', 'txtz', 'text', 'ht
'html', 'htmlz', 'xhtml', 'pdf', 'pdb', 'pdr', 'prc', 'mobi', 'azw', 'doc',
'epub', 'fb2', 'djv', 'djvu', 'lrx', 'cbr', 'cbz', 'cbc', 'oebzip',
'rb', 'imp', 'odt', 'chm', 'tpz', 'azw1', 'pml', 'pmlz', 'mbp', 'tan', 'snb',
'xps', 'oxps', 'azw4']
'xps', 'oxps', 'azw4', 'book', 'zbf']
class HTMLRenderer(object):

View File

@ -127,7 +127,7 @@ class FB2Input(InputFormatPlugin):
def extract_embedded_content(self, doc):
self.binary_map = {}
for elem in doc.xpath('./*'):
if 'binary' in elem.tag and elem.attrib.has_key('id'):
if elem.text and 'binary' in elem.tag and elem.attrib.has_key('id'):
ct = elem.get('content-type', '')
fname = elem.attrib['id']
ext = ct.rpartition('/')[-1].lower()

View File

@ -30,9 +30,11 @@ class Worker(Thread): # Get details {{{
Get book details from amazons book page in a separate thread
'''
def __init__(self, url, result_queue, browser, log, relevance, domain, plugin, timeout=20):
def __init__(self, url, result_queue, browser, log, relevance, domain,
plugin, timeout=20, testing=False):
Thread.__init__(self)
self.daemon = True
self.testing = testing
self.url, self.result_queue = url, result_queue
self.log, self.timeout = log, timeout
self.relevance, self.plugin = relevance, plugin
@ -189,10 +191,9 @@ class Worker(Thread): # Get details {{{
self.log.exception(msg)
return
oraw = raw
raw = xml_to_unicode(raw, strip_encoding_pats=True,
resolve_entities=True)[0]
#open('/t/t.html', 'wb').write(raw)
if '<title>404 - ' in raw:
self.log.error('URL malformed: %r'%self.url)
return
@ -211,14 +212,20 @@ class Worker(Thread): # Get details {{{
self.log.error(msg)
return
self.parse_details(root)
self.parse_details(oraw, root)
def parse_details(self, root):
def parse_details(self, raw, root):
try:
asin = self.parse_asin(root)
except:
self.log.exception('Error parsing asin for url: %r'%self.url)
asin = None
if self.testing:
import tempfile
with tempfile.NamedTemporaryFile(prefix=asin + '_',
suffix='.html', delete=False) as f:
f.write(raw)
print ('Downloaded html for', asin, 'saved in', f.name)
try:
title = self.parse_title(root)
@ -310,7 +317,7 @@ class Worker(Thread): # Get details {{{
return l.get('href').rpartition('/')[-1]
def parse_title(self, root):
tdiv = root.xpath('//h1[@class="parseasinTitle"]')[0]
tdiv = root.xpath('//h1[contains(@class, "parseasinTitle")]')[0]
actual_title = tdiv.xpath('descendant::*[@id="btAsinTitle"]')
if actual_title:
title = tostring(actual_title[0], encoding=unicode,
@ -320,11 +327,11 @@ class Worker(Thread): # Get details {{{
return re.sub(r'[(\[].*[)\]]', '', title).strip()
def parse_authors(self, root):
x = '//h1[@class="parseasinTitle"]/following-sibling::span/*[(name()="a" and @href) or (name()="span" and @class="contributorNameTrigger")]'
x = '//h1[contains(@class, "parseasinTitle")]/following-sibling::span/*[(name()="a" and @href) or (name()="span" and @class="contributorNameTrigger")]'
aname = root.xpath(x)
if not aname:
aname = root.xpath('''
//h1[@class="parseasinTitle"]/following-sibling::*[(name()="a" and @href) or (name()="span" and @class="contributorNameTrigger")]
//h1[contains(@class, "parseasinTitle")]/following-sibling::*[(name()="a" and @href) or (name()="span" and @class="contributorNameTrigger")]
''')
for x in aname:
x.tail = ''
@ -666,7 +673,8 @@ class Amazon(Source):
log.error('No matches found with query: %r'%query)
return
workers = [Worker(url, result_queue, br, log, i, domain, self) for i, url in
workers = [Worker(url, result_queue, br, log, i, domain, self,
testing=getattr(self, 'running_a_test', False)) for i, url in
enumerate(matches)]
for w in workers:
@ -740,16 +748,6 @@ if __name__ == '__main__': # tests {{{
),
( # An e-book ISBN not on Amazon, the title/author search matches
# the Kindle edition, which has different markup for ratings and
# isbn
{'identifiers':{'isbn': '9780307459671'},
'title':'Invisible Gorilla', 'authors':['Christopher Chabris']},
[title_test('The Invisible Gorilla: And Other Ways Our Intuitions Deceive Us',
exact=True), authors_test(['Christopher Chabris', 'Daniel Simons'])]
),
( # This isbn not on amazon
{'identifiers':{'isbn': '8324616489'}, 'title':'Learning Python',
'authors':['Lutz']},
@ -783,7 +781,7 @@ if __name__ == '__main__': # tests {{{
de_tests = [ # {{{
(
{'identifiers':{'isbn': '3548283519'}},
[title_test('Wer Wind sät',
[title_test('Wer Wind Sät: Der Fünfte Fall Für Bodenstein Und Kirchhoff',
exact=True), authors_test(['Nele Neuhaus'])
]
@ -835,6 +833,6 @@ if __name__ == '__main__': # tests {{{
] # }}}
test_identify_plugin(Amazon.name, com_tests)
#test_identify_plugin(Amazon.name, es_tests)
#test_identify_plugin(Amazon.name, de_tests)
# }}}

View File

@ -196,6 +196,7 @@ class Source(Plugin):
def __init__(self, *args, **kwargs):
Plugin.__init__(self, *args, **kwargs)
self.running_a_test = False # Set to True when using identify_test()
self._isbn_to_identifier_cache = {}
self._identifier_to_cover_url_cache = {}
self.cache_lock = threading.RLock()
@ -284,14 +285,15 @@ class Source(Plugin):
if authors:
# Leave ' in there for Irish names
remove_pat = re.compile(r'[,!@#$%^&*(){}`~"\s\[\]/]')
replace_pat = re.compile(r'[-+.:;]')
remove_pat = re.compile(r'[!@#$%^&*(){}`~"\s\[\]/]')
replace_pat = re.compile(r'[-+.:;,]')
if only_first_author:
authors = authors[:1]
for au in authors:
has_comma = ',' in au
au = replace_pat.sub(' ', au)
parts = au.split()
if ',' in au:
if has_comma:
# au probably in ln, fn form
parts = parts[1:] + parts[:1]
for tok in parts:

View File

@ -183,7 +183,11 @@ def test_identify_plugin(name, tests): # {{{
rq = Queue()
args = (log, rq, abort)
start_time = time.time()
err = plugin.identify(*args, **kwargs)
plugin.running_a_test = True
try:
err = plugin.identify(*args, **kwargs)
finally:
plugin.running_a_test = False
total_time = time.time() - start_time
times.append(total_time)
if err is not None:

View File

@ -138,6 +138,7 @@ class MobiMLizer(object):
self.mobimlize_elem(body, stylizer, BlockState(nbody),
[FormatState()])
item.data = nroot
#print etree.tostring(nroot)
def mobimlize_font(self, ptsize):
return self.fnums[self.fmap[ptsize]]
@ -233,9 +234,19 @@ class MobiMLizer(object):
elif tag in TABLE_TAGS:
para.attrib['valign'] = 'top'
if istate.ids:
last = bstate.body[-1]
for id in istate.ids:
last.addprevious(etree.Element(XHTML('a'), attrib={'id': id}))
for id_ in istate.ids:
anchor = etree.Element(XHTML('a'), attrib={'id': id_})
if tag == 'li':
try:
last = bstate.body[-1][-1]
except:
break
last.insert(0, anchor)
anchor.tail = last.text
last.text = None
else:
last = bstate.body[-1]
last.addprevious(anchor)
istate.ids.clear()
if not text:
return

View File

@ -66,12 +66,15 @@ class EXTHHeader(object):
# last update time
pass
elif id == 503: # Long title
if not title or title == _('Unknown') or \
'USER_CONTENT' in title or title.startswith('dtp_'):
try:
title = content.decode(codec)
except:
pass
# Amazon seems to regard this as the definitive book title
# rather than the title from the PDB header. In fact when
# sending MOBI files through Amazon's email service if the
# title contains non ASCII chars or non filename safe chars
# they are messed up in the PDB header
try:
title = content.decode(codec)
except:
pass
#else:
# print 'unknown record', id, repr(content)
if title:

View File

@ -601,7 +601,7 @@ class MobiWriter(object):
Write the PalmDB header
'''
title = ascii_filename(unicode(self.oeb.metadata.title[0])).replace(
' ', '_')[:32]
' ', '_')[:31]
title = title + (b'\0' * (32 - len(title)))
now = int(time.time())
nrecords = len(self.records)

View File

@ -74,7 +74,10 @@ class Extract(ODF2XHTML):
style = style[0]
css = style.text
if css:
style.text, sel_map = self.do_filter_css(css)
css, sel_map = self.do_filter_css(css)
if not isinstance(css, unicode):
css = css.decode('utf-8', 'ignore')
style.text = css
for x in root.xpath('//*[@class]'):
extra = []
orig = x.get('class')

View File

@ -20,6 +20,7 @@ from calibre.utils.config import DynamicConfig
from calibre.utils.logging import Log
from calibre import guess_type, prints, prepare_string_for_xml
from calibre.ebooks.oeb.transforms.cover import CoverManager
from calibre.constants import filesystem_encoding
TITLEPAGE = CoverManager.SVG_TEMPLATE.decode('utf-8').replace(\
'__ar__', 'none').replace('__viewbox__', '0 0 600 800'
@ -180,6 +181,8 @@ class EbookIterator(object):
self.delete_on_exit = []
self._tdir = TemporaryDirectory('_ebook_iter')
self.base = self._tdir.__enter__()
if not isinstance(self.base, unicode):
self.base = self.base.decode(filesystem_encoding)
from calibre.ebooks.conversion.plumber import Plumber, create_oebbook
plumber = Plumber(self.pathtoebook, self.base, self.log)
plumber.setup_options()

View File

@ -104,8 +104,9 @@ class RBWriter(object):
size = len(text)
pages = []
for i in range(0, (len(text) / TEXT_RECORD_SIZE) + 1):
pages.append(zlib.compress(text[i * TEXT_RECORD_SIZE : (i * TEXT_RECORD_SIZE) + TEXT_RECORD_SIZE], 9))
for i in range(0, (len(text) + TEXT_RECORD_SIZE-1) / TEXT_RECORD_SIZE):
zobj = zlib.compressobj(9, zlib.DEFLATED, 13, 8, 0)
pages.append(zobj.compress(text[i * TEXT_RECORD_SIZE : (i * TEXT_RECORD_SIZE) + TEXT_RECORD_SIZE]) + zobj.flush())
return (size, pages)

View File

@ -19,6 +19,8 @@ from calibre.ebooks.textile.unsmarten import unsmarten
class TextileMLizer(OEB2HTML):
MAX_EM = 10
def extract_content(self, oeb_book, opts):
self.log.info('Converting XHTML to Textile formatted TXT...')
self.opts = opts
@ -176,7 +178,7 @@ class TextileMLizer(OEB2HTML):
if 'margin-left' in style.cssdict() and style['margin-left'] != 'auto':
left_margin_pts = unit_convert(style['margin-left'], style.width, style.fontSize, stylizer.profile.dpi)
left = left_margin_pts + left_padding_pts
emleft = int(round(left / stylizer.profile.fbase))
emleft = min(int(round(left / stylizer.profile.fbase)), self.MAX_EM)
if emleft >= 1:
txt += '(' * emleft
right_padding_pts = 0
@ -186,7 +188,7 @@ class TextileMLizer(OEB2HTML):
if 'margin-right' in style.cssdict() and style['margin-right'] != 'auto':
right_margin_pts = unit_convert(style['margin-right'], style.width, style.fontSize, stylizer.profile.dpi)
right = right_margin_pts + right_padding_pts
emright = int(round(right / stylizer.profile.fbase))
emright = min(int(round(right / stylizer.profile.fbase)), self.MAX_EM)
if emright >= 1:
txt += ')' * emright
@ -243,7 +245,7 @@ class TextileMLizer(OEB2HTML):
# Soft scene breaks.
if 'margin-top' in style.cssdict() and style['margin-top'] != 'auto':
ems = int(round(float(style.marginTop) / style.fontSize) - 1)
ems = min(int(round(float(style.marginTop) / style.fontSize) - 1), self.MAX_EM)
if ems >= 1:
text.append(u'\n\n\xa0' * ems)
@ -476,7 +478,7 @@ class TextileMLizer(OEB2HTML):
# Soft scene breaks.
if 'margin-bottom' in style.cssdict() and style['margin-bottom'] != 'auto':
ems = int(round((float(style.marginBottom) / style.fontSize) - 1))
ems = min(int(round((float(style.marginBottom) / style.fontSize) - 1)), self.MAX_EM)
if ems >= 1:
text.append(u'\n\n\xa0' * ems)

View File

@ -326,6 +326,18 @@ class CoverView(QWidget): # {{{
if id_ is not None:
self.cover_removed.emit(id_)
def update_tooltip(self, current_path):
try:
sz = self.pixmap.size()
except:
sz = QSize(0, 0)
self.setToolTip(
'<p>'+_('Double-click to open Book Details window') +
'<br><br>' + _('Path') + ': ' + current_path +
'<br><br>' + _('Cover size: %(width)d x %(height)d')%dict(
width=sz.width(), height=sz.height())
)
# }}}
# Book Info {{{
@ -561,16 +573,7 @@ class BookDetails(QWidget): # {{{
def update_layout(self):
self._layout.do_layout(self.rect())
try:
sz = self.cover_view.pixmap.size()
except:
sz = QSize(0, 0)
self.setToolTip(
'<p>'+_('Double-click to open Book Details window') +
'<br><br>' + _('Path') + ': ' + self.current_path +
'<br><br>' + _('Cover size: %(width)d x %(height)d')%dict(
width=sz.width(), height=sz.height())
)
self.cover_view.update_tooltip(self.current_path)
def reset_info(self):
self.show_data(Metadata(_('Unknown')))

View File

@ -850,15 +850,16 @@ class DeviceMixin(object): # {{{
self.refresh_ondevice()
device_signals.device_metadata_available.emit()
def refresh_ondevice(self, reset_only = False):
def refresh_ondevice(self, reset_only=False):
'''
Force the library view to refresh, taking into consideration new
device books information
'''
self.book_on_device(None, reset=True)
if reset_only:
return
self.library_view.model().refresh_ondevice()
with self.library_view.preserve_state():
self.book_on_device(None, reset=True)
if reset_only:
return
self.library_view.model().refresh_ondevice()
# }}}
@ -888,7 +889,6 @@ class DeviceMixin(object): # {{{
# if set_books_in_library did not.
if not self.set_books_in_library(self.booklists(), reset=True, add_as_step_to_job=job):
self.upload_booklists(job)
self.book_on_device(None, reset=True)
# We need to reset the ondevice flags in the library. Use a big hammer,
# so we don't need to worry about whether some succeeded or not.
self.refresh_ondevice(reset_only=False)
@ -1319,9 +1319,7 @@ class DeviceMixin(object): # {{{
# If it does not, then do it here.
if not self.set_books_in_library(self.booklists(), reset=True, add_as_step_to_job=job):
self.upload_booklists(job)
with self.library_view.preserve_selected_books:
self.book_on_device(None, reset=True)
self.refresh_ondevice()
self.refresh_ondevice()
view = self.card_a_view if on_card == 'carda' else \
self.card_b_view if on_card == 'cardb' else self.memory_view

View File

@ -23,24 +23,43 @@ from calibre.gui2.library import DEFAULT_SORT
from calibre.constants import filesystem_encoding
from calibre import force_unicode
class PreserveSelection(object): # {{{
class PreserveViewState(object): # {{{
'''
Save the set of selected books at enter time. If at exit time there are no
selected books, restore the previous selection.
selected books, restore the previous selection, the previous current index
and dont affect the scroll position.
'''
def __init__(self, view):
def __init__(self, view, preserve_hpos=True, preserve_vpos=True):
self.view = view
self.selected_ids = []
self.selected_ids = set()
self.current_id = None
self.preserve_hpos = preserve_hpos
self.preserve_vpos = preserve_vpos
self.vscroll = self.hscroll = 0
def __enter__(self):
self.selected_ids = self.view.get_selected_ids()
try:
self.selected_ids = self.view.get_selected_ids()
self.current_id = self.view.current_id
self.vscroll = self.view.verticalScrollBar().value()
self.hscroll = self.view.horizontalScrollBar().value()
except:
import traceback
traceback.print_exc()
def __exit__(self, *args):
current = self.view.get_selected_ids()
if not current:
self.view.select_rows(self.selected_ids, using_ids=True)
if not current and self.selected_ids:
if self.current_id is not None:
self.view.current_id = self.current_id
self.view.select_rows(self.selected_ids, using_ids=True,
scroll=False, change_current=self.current_id is None)
if self.preserve_vpos:
self.view.verticalScrollBar().setValue(self.vscroll)
if self.preserve_hpos:
self.view.horizontalScrollBar().setValue(self.hscroll)
# }}}
class BooksView(QTableView): # {{{
@ -104,7 +123,7 @@ class BooksView(QTableView): # {{{
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setSortingEnabled(True)
self.selectionModel().currentRowChanged.connect(self._model.current_changed)
self.preserve_selected_books = PreserveSelection(self)
self.preserve_state = partial(PreserveViewState, self)
# {{{ Column Header setup
self.can_add_columns = True
@ -788,6 +807,23 @@ class BooksView(QTableView): # {{{
ans.append(i)
return ans
@dynamic_property
def current_id(self):
def fget(self):
try:
return self.model().id(self.currentIndex())
except:
pass
return None
def fset(self, val):
if val is None: return
m = self.model()
for row in xrange(m.rowCount(QModelIndex())):
if m.id(row) == val:
self.set_current_row(row, select=False)
break
return property(fget=fget, fset=fset)
def close(self):
self._model.close()

View File

@ -30,6 +30,13 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
(_('Automatic management'), 'on_connect')]
r('manage_device_metadata', prefs, choices=choices)
if gui.device_manager.is_device_connected:
self.opt_manage_device_metadata.setEnabled(False)
self.opt_manage_device_metadata.setToolTip(
_('Cannot change metadata management while a device is connected'))
self.mm_label.setText('Metadata management (disabled while '
'device connected)')
self.send_template.changed_signal.connect(self.changed_signal.emit)

View File

@ -15,7 +15,7 @@
</property>
<layout class="QGridLayout" name="gridLayout">
<item row="0" column="0">
<widget class="QLabel" name="label_4">
<widget class="QLabel" name="mm_label">
<property name="sizePolicy">
<sizepolicy hsizetype="Maximum" vsizetype="Preferred">
<horstretch>0</horstretch>

View File

@ -214,7 +214,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
fmvals = [f for f in default_prefs['field_metadata'].values() if f['is_custom']]
for f in fmvals:
self.create_custom_column(f['label'], f['name'], f['datatype'],
f['is_multiple'] is not None, f['is_editable'], f['display'])
f['is_multiple'] is not None and len(f['is_multiple']) > 0,
f['is_editable'], f['display'])
self.initialize_dynamic()
def get_property(self, idx, index_is_id=False, loc=-1):

View File

@ -14,7 +14,7 @@ from calibre.constants import iswindows
import cherrypy
def start_threaded_server(db, opts):
server = LibraryServer(db, opts, embedded=True)
server = LibraryServer(db, opts, embedded=True, show_tracebacks=False)
server.thread = Thread(target=server.start)
server.thread.setDaemon(True)
server.thread.start()
@ -112,7 +112,7 @@ def main(args=sys.argv):
if opts.with_library is None:
opts.with_library = prefs['library_path']
db = LibraryDatabase2(opts.with_library)
server = LibraryServer(db, opts)
server = LibraryServer(db, opts, show_tracebacks=opts.develop)
server.start()
return 0

View File

@ -243,7 +243,7 @@ Replace ``192.168.1.2`` with the local IP address of the computer running |app|.
If you get timeout errors while browsing the calibre catalog in Stanza, try increasing the connection timeout value in the stanza settings. Go to Info->Settings and increase the value of Download Timeout.
.. note::
As of iOS version 5 Stanza no longer works on Apple devices. Alternatives to Stanza are discussed `here <http://www.mobileread.com/forums/showthread.php?t=152789>`_.
As of iOS version 5 Stanza no longer works on Apple devices. Alternatives to Stanza are discussed `in this forum <http://www.mobileread.com/forums/showthread.php?t=152789>`_.
Using iBooks

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More