Merge from trunk

This commit is contained in:
Charles Haley 2011-08-14 08:36:39 +01:00
commit 664673eddc
104 changed files with 63642 additions and 132680 deletions

View File

@ -19,6 +19,53 @@
# new recipes:
# - title:
- version: 0.8.14
date: 2011-08-12
new features:
- title: "Make the keyboard shortcuts used by the main calibre interface user customizable, via Preferences->Advanced->Keyboard"
type: major
- title: "When switching libraries, if the library no longer exists, give the user a chance to specify a new location for the library, in case it was moved, before forgetting it."
tickets: [822018]
- title: "Template language: Add strcat and strlen builtin functions."
tickets: [821935]
bug fixes:
- title: "The various options to control how automerging works when adding books now also apply when copying a book from one library to another."
tickets: [822033]
- title: "Ebook viewer: Respond to key presses even when the book display area does not have keyboard focus"
- title: "Allow integer and float column values to go to -999999. -1000000 is the value of 'undefined'."
tickets: [821941]
- title: "Fix in calibre browser not working for the Open books store in Get Books."
tickets: [822359]
- title: "Fix regression in 0.8.13 that caused incorrect title/author for downloaded news if you turned off reading metadata from file contents in Preferences->Adding books"
- title: "Save to disk: When saving to a single directory, handle the case of the save to disk template containing path separators inside template expression correctly."
tickets: [821912]
- title: "Get Books: Always read metadata from the file contents, ignoring the setting in Preferences->Adding books"
- title: "Fix merge_metadata to not overwrite non-text fields ('bool', 'int', 'float', 'rating', 'datetime') that have a value of zero/false instead of None."
tickets: [821665]
improved recipes:
- The Independent
new recipes:
- title: "Novinite"
author: Martin Tsanchev
- title: "Blog Escrevinhador"
author: Diniz Bortolotto
- version: 0.8.13
date: 2011-08-05

View File

@ -30,8 +30,14 @@ class CnetNews(BasicNewsRecipe):
remove_tags = [
dict(name='div', attrs={'id':'tweetmemeAndFacebook'})
,dict(name='ul', attrs={'class':'contentTools'})
,dict(name='aside', attrs={'id':'filed'})
,dict(name='div', attrs={'class':'postLinks'})
,dict(name='span', attrs={'class':'shareButton'})
,dict(name='span', attrs={'class':'printButton'})
,dict(name='span', attrs={'class':'emailButton'})
,dict(name='div', attrs={'class':'editorBio'})
]
keep_only_tags = dict(name='div', attrs={'class':'txtWrap'})
keep_only_tags = dict(name='div', attrs={'class':'post'})
feeds = [(u'News', u'http://news.cnet.com/2547-1_3-0-20.xml')]

View File

@ -9,7 +9,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import Tag, NavigableString
from collections import OrderedDict
import time, re
import re
class Economist(BasicNewsRecipe):
@ -31,42 +31,33 @@ class Economist(BasicNewsRecipe):
{'class': lambda x: x and 'share-links-header' in x},
]
keep_only_tags = [dict(id='ec-article-body')]
needs_subscription = False
no_stylesheets = True
preprocess_regexps = [(re.compile('</html>.*', re.DOTALL),
lambda x:'</html>')]
# economist.com has started throttling after about 60% of the total has
# downloaded with connection reset by peer (104) errors.
delay = 1
needs_subscription = False
'''
def get_browser(self):
br = BasicNewsRecipe.get_browser()
br.open('http://www.economist.com')
req = mechanize.Request(
'http://www.economist.com/members/members.cfm?act=exec_login',
headers = {
'Referer':'http://www.economist.com/',
},
data=urllib.urlencode({
'logging_in' : 'Y',
'returnURL' : '/',
'email_address': self.username,
'fakepword' : 'Password',
'pword' : self.password,
'x' : '0',
'y' : '0',
}))
br.open(req).read()
if self.username and self.password:
br.open('http://www.economist.com/user/login')
br.select_form(nr=1)
br['name'] = self.username
br['pass'] = self.password
res = br.submit()
raw = res.read()
if '>Log out<' not in raw:
raise ValueError('Failed to login to economist.com. '
'Check your username and password.')
return br
'''
def parse_index(self):
try:
return self.economist_parse_index()
except:
raise
self.log.warn(
'Initial attempt to parse index failed, retrying in 30 seconds')
time.sleep(30)
return self.economist_parse_index()
return self.economist_parse_index()
def economist_parse_index(self):
soup = self.index_to_soup(self.INDEX)

View File

@ -36,27 +36,10 @@ class Economist(BasicNewsRecipe):
preprocess_regexps = [(re.compile('</html>.*', re.DOTALL),
lambda x:'</html>')]
'''
def get_browser(self):
br = BasicNewsRecipe.get_browser()
br.open('http://www.economist.com')
req = mechanize.Request(
'http://www.economist.com/members/members.cfm?act=exec_login',
headers = {
'Referer':'http://www.economist.com/',
},
data=urllib.urlencode({
'logging_in' : 'Y',
'returnURL' : '/',
'email_address': self.username,
'fakepword' : 'Password',
'pword' : self.password,
'x' : '0',
'y' : '0',
}))
br.open(req).read()
return br
'''
# economist.com has started throttling after about 60% of the total has
# downloaded with connection reset by peer (104) errors.
delay = 1
def parse_index(self):
try:

View File

@ -18,7 +18,7 @@ class ElMundo(BasicNewsRecipe):
no_stylesheets = True
use_embedded_content = False
encoding = 'iso8859_15'
language = 'es_ES'
language = 'es'
masthead_url = 'http://estaticos03.elmundo.es/elmundo/iconos/v4.x/v4.01/bg_h1.png'
publication_type = 'newspaper'
extra_css = """

Binary file not shown.

After

Width:  |  Height:  |  Size: 343 B

View File

@ -1,70 +1,86 @@
__license__ = 'GPL v3'
__copyright__ = '2011, Darko Miletic <darko.miletic at gmail.com>'
'''
www.independent.co.uk
'''
from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import BeautifulSoup
class TheIndependent(BasicNewsRecipe):
title = u'The Independent'
language = 'en_GB'
__author__ = 'Krittika Goyal'
oldest_article = 1 #days
max_articles_per_feed = 30
encoding = 'latin1'
title = 'The Independent'
__author__ = 'Darko Miletic'
description = 'Independent News - Breaking news, comment and features from The Independent newspaper'
publisher = 'The Independent'
category = 'news, politics, UK'
oldest_article = 2
max_articles_per_feed = 200
no_stylesheets = True
encoding = 'cp1252'
use_embedded_content = False
language = 'en_GB'
remove_empty_feeds = True
publication_type = 'newspaper'
masthead_url = 'http://www.independent.co.uk/independent.co.uk/images/logo-london.png'
extra_css = """
h1{font-family: Georgia,serif }
body{font-family: Verdana,Arial,Helvetica,sans-serif}
img{margin-bottom: 0.4em; display:block}
.info,.caption,.credits{font-size: x-small}
"""
no_stylesheets = True
#remove_tags_before = dict(name='h1', attrs={'class':'heading'})
#remove_tags_after = dict(name='td', attrs={'class':'newptool1'})
remove_tags = [
dict(name='iframe'),
dict(name='div', attrs={'class':'related-articles'}),
dict(name='div', attrs={'id':['qrformdiv', 'inSection', 'alpha-inner']}),
dict(name='ul', attrs={'class':'article-tools'}),
dict(name='ul', attrs={'class':'articleTools'}),
]
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
}
feeds = [
('UK',
'http://www.independent.co.uk/news/uk/rss'),
('World',
'http://www.independent.co.uk/news/world/rss'),
('Business',
'http://www.independent.co.uk/news/business/rss'),
('People',
'http://www.independent.co.uk/news/people/rss'),
('Science',
'http://www.independent.co.uk/news/science/rss'),
('Media',
'http://www.independent.co.uk/news/media/rss'),
('Education',
'http://www.independent.co.uk/news/education/rss'),
('Obituaries',
'http://www.independent.co.uk/news/obituaries/rss'),
remove_tags =[
dict(name=['meta','link','object','embed','iframe','base','style'])
,dict(attrs={'class':['related-articles','share','googleCols','article-tools','paging','googleArt']})
,dict(attrs={'id':['newsVideoPlayer','yahoobook','google-intext']})
]
keep_only_tags =[dict(attrs={'id':'article'})]
remove_attributes=['lang','onclick','width','xmlns:fb']
('Opinion',
'http://www.independent.co.uk/opinion/rss'),
('Environment',
'http://www.independent.co.uk/environment/rss'),
feeds = [
(u'UK' , u'http://www.independent.co.uk/news/uk/rss' )
,(u'World' , u'http://www.independent.co.uk/news/world/rss' )
,(u'Business' , u'http://www.independent.co.uk/news/business/rss' )
,(u'People' , u'http://www.independent.co.uk/news/people/rss' )
,(u'Science' , u'http://www.independent.co.uk/news/science/rss' )
,(u'Media' , u'http://www.independent.co.uk/news/media/rss' )
,(u'Education' , u'http://www.independent.co.uk/news/education/rss' )
,(u'Leading Articles' , u'http://www.independent.co.uk/opinion/leading-articles/rss')
,(u'Comentators' , u'http://www.independent.co.uk/opinion/commentators/rss' )
,(u'Columnists' , u'http://www.independent.co.uk/opinion/columnists/rss' )
,(u'Letters' , u'http://www.independent.co.uk/opinion/letters/rss' )
,(u'Big Question' , u'http://www.independent.co.uk/extras/big-question/rss' )
,(u'Sport' , u'http://www.independent.co.uk/sport/rss' )
,(u'Life&Style' , u'http://www.independent.co.uk/life-style/rss' )
,(u'Arts&Entertainment' , u'http://www.independent.co.uk/arts-entertainment/rss' )
,(u'Travel' , u'http://www.independent.co.uk/travel/rss' )
,(u'Money' , u'http://www.independent.co.uk/money/rss' )
]
('Sport',
'http://www.independent.co.uk/sport/rss'),
('Life and Style',
'http://www.independent.co.uk/life-style/rss'),
('Arts and Entertainment',
'http://www.independent.co.uk/arts-entertainment/rss'),
('Travel',
'http://www.independent.co.uk/travel/rss'),
('Money',
'http://www.independent.co.uk/money/rss'),
]
def get_article_url(self, article):
return article.get('guid', None)
def preprocess_html(self, soup):
story = soup.find(name='div', attrs={'id':'mainColumn'})
#td = heading.findParent(name='td')
#td.extract()
soup = BeautifulSoup('<html><head><title>t</title></head><body></body></html>')
body = soup.find(name='body')
body.insert(0, story)
return soup
for item in soup.body.findAll(style=True):
del item['style']
for item in soup.body.findAll(['author','preform']):
item.name='span'
for item in soup.body.findAll('img'):
if not item.has_key('alt'):
item['alt'] = 'image'
for item in soup.body.findAll('div', attrs={'class':['clear-o','body','photoCaption']}):
item.name = 'p'
for item in soup.body.findAll('div'):
if not item.attrs and not item.contents:
item.extract()
soup2 = BeautifulSoup('<html><head><title>t</title></head><body></body></html>')
soup2.body.replaceWith(soup.body)
return soup2

View File

@ -1,55 +1,46 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008-2009, Darko Miletic <darko.miletic at gmail.com>'
__copyright__ = '2011, Oscar Megia Lopez'
'''
juventudrebelde.cu
'''
import re
from calibre.web.feeds.recipes import BasicNewsRecipe
from calibre import strftime
from calibre.web.feeds.news import BasicNewsRecipe
class JuventudRebelde(BasicNewsRecipe):
title = u'Juventud Rebelde'
__author__ = 'Oscar Megia Lopez'
description = 'Periodico cubano'
oldest_article = 30
max_articles_per_feed = 100
no_stylesheets = True
#delay = 1
use_embedded_content = False
encoding = 'utf8'
publisher = 'Juventud Rebelde'
category = 'Noticias'
language = 'es'
publication_type = 'Periodico'
extra_css = ' body{ font-family: Verdana,Helvetica,Arial,sans-serif } .title{font-weight: bold} .read{display: block; padding: 0; border: 1px solid; width: 40%; font-size: small} .story-feature h2{text-align: center; text-transform: uppercase} '
preprocess_regexps = [(re.compile(r'<!--.*?-->', re.DOTALL), lambda m: '')]
conversion_options = {
'comments' : description
,'tags' : category
,'language' : language
,'publisher' : publisher
,'linearize_tables': True
}
class Juventudrebelde(BasicNewsRecipe):
title = 'Juventud Rebelde'
__author__ = 'Darko Miletic'
description = 'Diario de la Juventud Cubana'
publisher = 'Juventud rebelde'
category = 'news, politics, Cuba'
oldest_article = 2
max_articles_per_feed = 100
no_stylesheets = True
use_embedded_content = False
encoding = 'cp1252'
language = 'es_CU'
cover_url = strftime('http://www.juventudrebelde.cu/UserFiles/File/impreso/iportada-%Y-%m-%d.jpg')
remove_javascript = True
html2lrf_options = [
'--comment' , description
, '--category' , category
, '--publisher', publisher
, '--ignore-tables'
keep_only_tags = [
dict(name='div', attrs={'class':['title']})
,dict(attrs={'class':['read']})
,dict(attrs={'class':['author']})
]
html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"\nlinearize_tables=True'
remove_tags = [
dict(name='div', attrs={'class':['share']}),
]
keep_only_tags = [dict(name='div', attrs={'id':'noticia'})]
remove_attributes = ['width','height']
feeds = [
(u'Generales', u'http://www.juventudrebelde.cu/rss/generales.php' )
,(u'Cuba', u'http://www.juventudrebelde.cu/rss/generales.php?seccion=cuba' )
,(u'Internacionales', u'http://www.juventudrebelde.cu/rss/generales.php?seccion=internacionales' )
,(u'Opinion', u'http://www.juventudrebelde.cu/rss/generales.php?seccion=opinion' )
,(u'Cultura', u'http://www.juventudrebelde.cu/rss/generales.php?seccion=cultura' )
,(u'Deportes', u'http://www.juventudrebelde.cu/rss/generales.php?seccion=deportes' )
,(u'Lectura', u'http://www.juventudrebelde.cu/rss/generales.php?seccion=lectura' )
]
def preprocess_html(self, soup):
mtag = '<meta http-equiv="Content-Language" content="es-CU"/>'
soup.head.insert(0,mtag)
for item in soup.findAll(style=True):
del item['style']
return soup
feeds = [(u'Generales', u'http://www.juventudrebelde.cu/get/rss/grupo/generales/'), (u'Internacionales', u'http://www.psychologytoday.com/blog/romance-redux/feed'), (u'Ciencia y Tecnica', u'http://www.juventudrebelde.cu/get/rss/noticias/ciencia-tecnica/'), (u'Opini\xf3n', u'http://www.juventudrebelde.cu/get/rss/noticias/opinion/'), (u'Cuba', u'http://www.juventudrebelde.cu/get/rss/noticias/cuba/'), (u'Cultura', u'http://www.juventudrebelde.cu/get/rss/noticias/cultura/'), (u'Deportes', u'http://www.juventudrebelde.cu/get/rss/noticias/deportes')]

View File

@ -26,7 +26,7 @@ class AdvancedUserRecipe1294946868(BasicNewsRecipe):
use_embedded_content = False
encoding = 'utf-8'
language = 'es_ES'
language = 'es'
timefmt = '[%a, %d %b, %Y]'
keep_only_tags = [

View File

@ -17,18 +17,15 @@ class Lanacion(BasicNewsRecipe):
use_embedded_content = False
no_stylesheets = True
language = 'es_AR'
delay = 14
publication_type = 'newspaper'
remove_empty_feeds = True
masthead_url = 'http://www.lanacion.com.ar/_ui/desktop/imgs/layout/logos/ln341x47.gif'
masthead_url = 'http://www.lanacion.com.ar/_ui/desktop/imgs/layout/logos/ln-home.gif'
extra_css = """
h1{font-family: Georgia,serif}
h2{color: #626262; font-weight: normal; font-size: 1.1em}
h1{font-family: TheSans,Arial,sans-serif}
body{font-family: Arial,sans-serif}
img{margin-top: 0.5em; margin-bottom: 0.2em; display: block}
.notaFecha{color: #808080; font-size: small}
.notaEpigrafe{font-size: x-small}
.topNota h1{font-family: Arial,sans-serif}
img{display: block}
.firma,.fecha{font-size: small}
.epigrafe-columna{font-size: x-small}
"""
@ -39,21 +36,13 @@ class Lanacion(BasicNewsRecipe):
, 'language' : language
}
keep_only_tags = [
dict(name='div', attrs={'class':['topNota','itemHeader','nota','itemBody']})
,dict(name='div', attrs={'id':'content'})
]
remove_tags = [
dict(name='div' , attrs={'class':'notaComentario floatFix noprint' })
,dict(name='ul' , attrs={'class':['cajaHerramientas cajaTop noprint','herramientas noprint']})
,dict(name='div' , attrs={'class':['titulosMultimedia','herramientas noprint','cajaHerramientas noprint','cajaHerramientas floatFix'] })
,dict(attrs={'class':['izquierda','espacio17','espacio10','espacio20','floatFix ultimasNoticias','relacionadas','titulosMultimedia','derecha','techo color','encuesta','izquierda compartir','floatFix','videoCentro']})
,dict(name=['iframe','embed','object','form','base','hr','meta','link','input'])
dict(name=['iframe','embed','object','meta','link'])
,dict(attrs={'id':['herramientas','relacionadas','ampliar']})
]
remove_tags_after = dict(attrs={'class':['tags','nota-destacado']})
remove_attributes = ['height','width','visible','onclick','data-count','name']
remove_tags_before = dict(attrs={'id':'encabezado'})
remove_tags_after = dict(attrs={'id':'relacionadas'})
feeds = [
(u'Politica' , u'http://servicios.lanacion.com.ar/herramientas/rss/categoria_id=30' )
@ -92,6 +81,15 @@ class Lanacion(BasicNewsRecipe):
return None
return link
def get_cover_url(self):
soup = self.index_to_soup('http://www.lanacion.com.ar/edicion-impresa')
atap = soup.find(attrs={'class':'tapa'})
if atap:
li = atap.find('img')
if li:
return li['src']
return None
def preprocess_html(self, soup):
for item in soup.findAll(style=True):
del item['style']

View File

@ -23,7 +23,7 @@ class LaTribuna(BasicNewsRecipe):
encoding = 'utf-8'
language = 'es_HN'
lang = 'es-HN'
lang = 'es_HN'
direction = 'ltr'
html2lrf_options = [

View File

@ -19,7 +19,7 @@ class Marca(BasicNewsRecipe):
use_embedded_content = False
delay = 1
encoding = 'iso-8859-15'
language = 'es_ES'
language = 'es'
publication_type = 'newsportal'
masthead_url = 'http://estaticos.marca.com/deporte/img/v3.0/img_marca-com.png'
extra_css = """

View File

@ -1,91 +1,135 @@
# -*- coding: utf-8 -*-
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, matek09, matek09@gmail.com'
from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ptempfile import PersistentTemporaryFile
import datetime
class Newsweek(BasicNewsRecipe):
FIND_LAST_FULL_ISSUE = True
EDITION = '0'
EXCLUDE_LOCKED = True
LOCKED_ICO = 'http://www.newsweek.pl/bins/media/static/newsweek/img/ico_locked.gif'
DATE = None
YEAR = datetime.datetime.now().year
title = u'Newsweek Polska'
__author__ = 'matek09'
description = 'Weekly magazine'
encoding = 'utf-8'
no_stylesheets = True
language = 'pl'
remove_javascript = True
keep_only_tags =[]
keep_only_tags.append(dict(name = 'div', attrs = {'class' : 'article'}))
temp_files = []
articles_are_obfuscated = True
remove_tags =[]
remove_tags.append(dict(name = 'div', attrs = {'class' : 'copy'}))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'url'}))
extra_css = '''
.body {font-size: small}
.author {font-size: x-small}
.lead {font-size: x-small}
.title{font-size: x-large; font-weight: bold}
'''
def get_obfuscated_article(self, url):
br = self.get_browser()
br.open(url)
source = br.response().read()
page = self.index_to_soup(source)
def print_version(self, url):
return url.replace("http://www.newsweek.pl/artykuly/wydanie/" + str(self.EDITION), "http://www.newsweek.pl/artykuly") + '/print'
main_section = page.find(id='mainSection')
def is_locked(self, a):
if a.findNext('img')['src'] == 'http://www.newsweek.pl/bins/media/static/newsweek/img/ico_locked.gif':
return True
else:
return False
title = main_section.find('h1')
info = main_section.find('ul', attrs={'class' : 'articleInfo'})
authors = info.find('li').find('h4')
article = main_section.find('div', attrs={'id' : 'article'})
html = unicode(title) + unicode(authors) + unicode(article)
next = main_section.find('li', attrs={'class' : 'next'})
while next:
url = next.find('a')['href']
br.open(url)
source = br.response().read()
page = self.index_to_soup(source)
main_section = page.find(id='mainSection')
article = main_section.find('div', attrs={'id' : 'article'})
aside = article.find(id='articleAside')
if aside is not None:
aside.extract()
html = html + unicode(article)
next = main_section.find('li', attrs={'class' : 'next'})
self.temp_files.append(PersistentTemporaryFile('_temparse.html'))
self.temp_files[-1].write(html)
self.temp_files[-1].close()
return self.temp_files[-1].name
def is_full(self, issue_soup):
if len(issue_soup.findAll('img', attrs={'src' : 'http://www.newsweek.pl/bins/media/static/newsweek/img/ico_locked.gif'})) > 1:
return False
else:
return True
def find_last_full_issue(self):
frame_url = 'http://www.newsweek.pl/Frames/IssueCover.aspx'
while True:
frame_soup = self.index_to_soup(frame_url)
self.EDITION = frame_soup.find('a', attrs={'target' : '_parent'})['href'].replace('/wydania/','')
main_section = issue_soup.find(id='mainSection')
next = main_section.find('li', attrs={'class' : 'next'})
if len(main_section.findAll(attrs={'class' : 'locked'})) > 1:
return False
elif next is None:
return True
else:
issue_soup = self.index_to_soup(next.find('a')['href'])
def find_last_full_issue(self, archive_url):
archive_soup = self.index_to_soup(archive_url)
select = archive_soup.find('select', attrs={'id' : 'paper_issue_select'})
for option in select.findAll(lambda tag: tag.name == 'option' and tag.has_key('value')):
self.EDITION = option['value'].replace('http://www.newsweek.pl/wydania/','')
issue_soup = self.index_to_soup('http://www.newsweek.pl/wydania/' + self.EDITION)
if self.is_full(issue_soup):
break
frame_url = 'http://www.newsweek.pl/Frames/' + frame_soup.find(lambda tag: tag.name == 'span' and not tag.attrs).a['href']
return
self.YEAR = self.YEAR - 1
self.find_last_full_issue(archive_url + ',' + str(self.YEAR))
def parse_index(self):
if self.FIND_LAST_FULL_ISSUE:
self.find_last_full_issue()
archive_url = 'http://www.newsweek.pl/wydania/archiwum'
self.find_last_full_issue(archive_url)
soup = self.index_to_soup('http://www.newsweek.pl/wydania/' + self.EDITION)
img = soup.find('img', id="ctl00_C1_PaperIsssueView_IssueImage", src=True)
self.DATE = self.tag_to_string(soup.find('span', attrs={'class' : 'data'}))
main_section = soup.find(id='mainSection')
img = main_section.find(lambda tag: tag.name == 'img' and tag.has_key('alt') and tag.has_key('title'))
self.cover_url = img['src']
feeds = []
parent = soup.find(id='content-left-big')
for txt in parent.findAll(attrs={'class':'txt_normal_red strong'}):
articles = list(self.find_articles(txt))
if len(articles) > 0:
section = self.tag_to_string(txt).capitalize()
feeds.append((section, articles))
articles = {}
sections = []
while True:
news_list = main_section.find('ul', attrs={'class' : 'newsList'})
for h2 in news_list.findAll('h2'):
article = self.create_article(h2)
category_div = h2.findNext('div', attrs={'class' : 'kategorie'})
section = self.tag_to_string(category_div)
if articles.has_key(section):
articles[section].append(article)
else:
articles[section] = [article]
sections.append(section)
next = main_section.find('li', attrs={'class' : 'next'})
if next is None:
break
soup = self.index_to_soup(next.find('a')['href'])
main_section = soup.find(id='mainSection')
for section in sections:
feeds.append((section, articles[section]))
return feeds
def find_articles(self, txt):
for a in txt.findAllNext( attrs={'class':['strong','hr']}):
if a.name in "div":
break
if (not self.FIND_LAST_FULL_ISSUE) & self.EXCLUDE_LOCKED & self.is_locked(a):
continue
yield {
'title' : self.tag_to_string(a),
'url' : 'http://www.newsweek.pl' + a['href'],
'date' : '',
'description' : ''
}
def create_article(self, h2):
article = {}
a = h2.find('a')
article['title'] = self.tag_to_string(a)
article['url'] = a['href']
article['date'] = self.DATE
desc = h2.findNext('p')
if desc is not None:
article['description'] = self.tag_to_string(desc)
else:
article['description'] = ''
return article

35
recipes/novinite.recipe Normal file
View File

@ -0,0 +1,35 @@
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1308572538(BasicNewsRecipe):
title = u'Novinite.com'
__author__ = 'Martin Tsanchev'
description = 'Real time provider of the latest Bulgarian news in English'
category = 'Business, Politics, Society, Sports, Crime, Lifestyle, World, People'
language = 'en_BG'
encoding = 'utf-8'
oldest_article = 7
max_articles_per_feed = 10
keep_only_tags = [dict(name='div', attrs={'id':'content'})]
remove_tags = [dict(name='a', attrs={'class':'twitter-share-button'})]
remove_tags_after = dict(id='textsize')
no_stylesheets = True
feeds = [(u'Business', u'http://www.novinite.com/services/news_rdf.php?category_id=1'),
(u'Finance', u'http://www.novinite.com/services/news_rdf.php?category_id=15'),
(u'Energy', u'http://www.novinite.com/services/news_rdf.php?category_id=16'),
(u'Industry', u'http://www.novinite.com/services/news_rdf.php?category_id=17'),
(u'Properties', u'http://www.novinite.com/services/news_rdf.php?category_id=18'),
(u'Politics', u'http://www.novinite.com/services/news_rdf.php?category_id=2'),
(u'Diplomacy', u'http://www.novinite.com/services/news_rdf.php?category_id=20'),
(u'Defense', u'http://www.novinite.com/services/news_rdf.php?category_id=21'),
(u'Bulgaria in EU', u'http://www.novinite.com/services/news_rdf.php?category_id=22'),
(u'Domestic', u'http://www.novinite.com/services/news_rdf.php?category_id=23'),
(u'Society', u'http://www.novinite.com/services/news_rdf.php?category_id=3'),
(u'Environment', u'http://www.novinite.com/services/news_rdf.php?category_id=24'),
(u'Education', u'http://www.novinite.com/services/news_rdf.php?category_id=25'),
(u'Culture', u'http://www.novinite.com/services/news_rdf.php?category_id=26'),
(u'Archaeology', u'http://www.novinite.com/services/news_rdf.php?category_id=34'),
(u'Health', u'http://www.novinite.com/services/news_rdf.php?category_id=62'),
(u'Sports', u'http://www.novinite.com/services/news_rdf.php?category_id=4'),
(u'Crime', u'http://www.novinite.com/services/news_rdf.php?category_id=5'),
(u'Lifestyle', u'http://www.novinite.com/services/news_rdf.php?category_id=6'),
(u'World', u'http://www.novinite.com/services/news_rdf.php?category_id=30')]

View File

@ -0,0 +1,43 @@
__license__ = 'GPL v3'
__copyright__ = '2011, Oscar Megia Lopez'
'''
perezreverte.com
'''
import re
from calibre.web.feeds.recipes import BasicNewsRecipe
class PerezReverte(BasicNewsRecipe):
title = u'Patente de Corso'
__author__ = 'Oscar Megia Lopez'
description = 'Arturo Perez Reverte'
oldest_article = 90
max_articles_per_feed = 100
no_stylesheets = True
#delay = 1
use_embedded_content = False
encoding = 'utf8'
publisher = 'Arturo Perez Reverte'
category = 'Articulo'
language = 'es'
publication_type = 'Magazine'
extra_css = ' body{ font-family: Verdana,Helvetica,Arial,sans-serif } .contentheading{font-weight: bold} .txt_articulo{display: block; padding: 0; border: 1px solid; width: 40%; font-size: small} .story-feature h2{text-align: center; text-transform: uppercase} '
preprocess_regexps = [(re.compile(r'<!--.*?-->', re.DOTALL), lambda m: '')]
conversion_options = {
'comments' : description
,'tags' : category
,'language' : language
,'publisher' : publisher
,'linearize_tables': True
}
keep_only_tags = [
dict(name='h2', attrs={'class':['titular']}),
dict(name='p', attrs={'class':['fecha']}),
dict(name='div', attrs={'class':['bloqueTexto']})
]
remove_attributes = ['width','height']
feeds = [
('Patente de corso - Web oficial de Arturo Perez Reverte', 'http://www.perezreverte.com/rss/patentes-corso/')
]

View File

@ -1,12 +1,12 @@
from calibre.web.feeds.news import BasicNewsRecipe
class RzeczpospolitaRecipe(BasicNewsRecipe):
__license__ = 'GPL v3'
__license__ = 'GPL v3'
__author__ = u'kwetal and Tomasz Dlugosz'
language = 'pl'
version = 1
title = u'Rzeczpospolita OnLine'
title = u'Rzeczpospolita OnLine'
publisher = u'Presspublica Sp.'
category = u'News'
description = u'Newspaper'
@ -31,15 +31,19 @@ class RzeczpospolitaRecipe(BasicNewsRecipe):
feeds.append(u'http://www.rp.pl/rss/8.html')
keep_only_tags =[]
keep_only_tags.append(dict(name = 'div', attrs = {'id' : 'storyp'}))
keep_only_tags.append(dict(name = 'div', attrs = {'id' : 'story'}))
remove_tags =[]
remove_tags.append(dict(name = 'div', attrs = {'id' : 'adk_0'}))
remove_tags.append(dict(name = 'div', attrs = {'id' : 'socialTools'}))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'articleToolBoxTop'}))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'clr'}))
remove_tags.append(dict(name = 'div', attrs = {'id' : 'share_bottom'}))
remove_tags.append(dict(name = 'div', attrs = {'id' : 'copyright_law'}))
remove_tags.append(dict(name = 'div', attrs = {'id' : 'recommendations'}))
remove_tags.append(dict(name = 'div', attrs = {'id' : 'editorPicks'}))
remove_tags.append(dict(name = 'div', attrs = {'id' : 'articleCopyrightText'}))
remove_tags.append(dict(name = 'div', attrs = {'id' : 'articleCopyrightButton'}))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'articleToolBoxBottom'}))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'more'}))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'editorPicks'}))
remove_tags.append(dict(name = 'div', attrs = {'class' : 'addRecommendation'}))
extra_css = '''
body {font-family: verdana, arial, helvetica, geneva, sans-serif ;}
@ -62,3 +66,4 @@ class RzeczpospolitaRecipe(BasicNewsRecipe):
forget, sep, index = rest.rpartition(',')
return start + '/' + index + '?print=tak'

View File

@ -179,6 +179,9 @@ save_template_title_series_sorting = 'library_order'
# changed. Changes to this tweak won't have an effect until the book is modified
# in some way. If you enter an invalid pattern, it is silently ignored.
# To disable use the expression: '^$'
# This expression is designed for articles that are followed by spaces. If you
# also need to match articles that are followed by other characters, for example L'
# in French, use: r"^(A\s+|The\s+|An\s+|L')" instead.
# Default: '^(A|The|An)\s+'
title_sort_articles=r'^(A|The|An)\s+'

View File

@ -21,3 +21,5 @@ vipy.session.add_content_browser('.r', ',r', 'Recipe',
vipy.session.glob_based_iterator(os.path.join(project_dir, 'recipes', '*.recipe')),
vipy.session.regexp_based_matcher(r'title\s*=\s*(?P<title>.+)', 'title', recipe_title_callback))
EOFPY
nmap \log :enew<CR>:read ! bzr log -l 500 ../.. <CR>:e ../../Changelog.yaml<CR>:e constants.py<CR>

2169
setup/iso639.xml Normal file

File diff suppressed because it is too large Load Diff

View File

@ -273,10 +273,9 @@ class GetTranslations(Translations):
class ISO639(Command):
description = 'Compile translations for ISO 639 codes'
XML = '/usr/lib/python2.7/site-packages/pycountry/databases/iso639.xml'
def run(self, opts):
src = self.XML
src = self.j(self.d(self.SRC), 'setup', 'iso639.xml')
if not os.path.exists(src):
raise Exception(src + ' does not exist')
dest = self.j(self.d(self.SRC), 'resources', 'localization',
@ -290,20 +289,27 @@ class ISO639(Command):
by_2 = {}
by_3b = {}
by_3t = {}
m2to3 = {}
m3to2 = {}
codes2, codes3t, codes3b = set([]), set([]), set([])
for x in root.xpath('//iso_639_entry'):
name = x.get('name')
two = x.get('iso_639_1_code', None)
threeb = x.get('iso_639_2B_code')
threet = x.get('iso_639_2T_code')
if two is not None:
by_2[two] = name
codes2.add(two)
by_3b[x.get('iso_639_2B_code')] = name
by_3t[x.get('iso_639_2T_code')] = name
m2to3[two] = threet
m3to2[threeb] = m3to2[threet] = two
by_3b[threeb] = name
by_3t[threet] = name
codes3b.add(x.get('iso_639_2B_code'))
codes3t.add(x.get('iso_639_2T_code'))
from cPickle import dump
x = {'by_2':by_2, 'by_3b':by_3b, 'by_3t':by_3t, 'codes2':codes2,
'codes3b':codes3b, 'codes3t':codes3t}
'codes3b':codes3b, 'codes3t':codes3t, '2to3':m2to3,
'3to2':m3to2}
dump(x, open(dest, 'wb'), -1)

View File

@ -4,7 +4,7 @@ __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
__appname__ = u'calibre'
numeric_version = (0, 8, 13)
numeric_version = (0, 8, 14)
__version__ = u'.'.join(map(unicode, numeric_version))
__author__ = u"Kovid Goyal <kovid@kovidgoyal.net>"

View File

@ -48,7 +48,7 @@ class ANDROID(USBMS):
# Google
0x18d1 : {
0x0001 : [0x0223],
0x0001 : [0x0223, 0x9999],
0x4e11 : [0x0100, 0x226, 0x227],
0x4e12 : [0x0100, 0x226, 0x227],
0x4e21 : [0x0100, 0x226, 0x227],
@ -76,8 +76,11 @@ class ANDROID(USBMS):
0x413c : { 0xb007 : [0x0100, 0x0224, 0x0226]},
# LG
0x1004 : { 0x61cc : [0x100], 0x61ce : [0x100], 0x618e : [0x226,
0x9999] },
0x1004 : {
0x61cc : [0x100],
0x61ce : [0x100],
0x618e : [0x226, 0x9999, 0x100]
},
# Archos
0x0e79 : {

View File

@ -35,7 +35,7 @@ class ISBNDB(Source):
options = (
Option('isbndb_key', 'string', None, _('IsbnDB key:'),
_('To use isbndb.com you have to sign up for a free account'
_('To use isbndb.com you have to sign up for a free account '
'at isbndb.com and get an access key.')),
)

View File

@ -97,6 +97,7 @@ gprefs.defaults['book_display_fields'] = [
('last_modified', False), ('size', False),
]
gprefs.defaults['default_author_link'] = 'http://en.wikipedia.org/w/index.php?search={author}'
gprefs.defaults['preserve_date_on_ctl'] = True
# }}}

View File

@ -15,6 +15,7 @@ from calibre.gui2.actions import InterfaceAction
from calibre.gui2 import error_dialog, Dispatcher, warning_dialog, gprefs
from calibre.gui2.dialogs.progress import ProgressDialog
from calibre.utils.config import prefs, tweaks
from calibre.utils.date import now
class Worker(Thread): # {{{
@ -55,6 +56,8 @@ class Worker(Thread): # {{{
for i, x in enumerate(self.ids):
mi = self.db.get_metadata(x, index_is_id=True, get_cover=True,
cover_as_data=True)
if not gprefs['preserve_date_on_ctl']:
mi.timestamp = now()
self.progress(i, mi.title)
fmts = self.db.formats(x, index_is_id=True)
if not fmts: fmts = []

View File

@ -239,7 +239,7 @@ class DBAdder(QObject): # {{{
class Adder(QObject): # {{{
ADD_TIMEOUT = 600 # seconds
ADD_TIMEOUT = 900 # seconds (15 minutes)
def __init__(self, parent, db, callback, spare_server=None):
QObject.__init__(self, parent)

View File

@ -20,7 +20,7 @@ from calibre.constants import DEBUG
from calibre import prints
from calibre.utils.icu import sort_key, lower
from calibre.gui2 import NONE, error_dialog, info_dialog
from calibre.utils.search_query_parser import SearchQueryParser
from calibre.utils.search_query_parser import SearchQueryParser, ParseException
from calibre.gui2.search_box import SearchBox2
ROOT = QModelIndex()
@ -452,7 +452,9 @@ class Delegate(QStyledItemDelegate): # {{{
def to_doc(self, index):
data = index.data(Qt.UserRole).toPyObject()
if data.is_shortcut:
if data is None:
html = _('<b>This shortcut no longer exists</b>')
elif data.is_shortcut:
shortcut = data.data
# Shortcut
keys = [unicode(k.toString(k.NativeText)) for k in shortcut['keys']]
@ -581,18 +583,26 @@ class ShortcutConfig(QWidget): # {{{
def scrollTo(self, index):
if index is not None:
self.view.scrollTo(index, self.view.PositionAtCenter)
self.view.scrollTo(index, self.view.PositionAtTop)
@property
def is_editing(self):
return self.view.state() == self.view.EditingState
def find(self, query):
idx = self._model.find(query)
if not query:
return
try:
idx = self._model.find(query)
except ParseException:
self.search.search_done(False)
return
self.search.search_done(True)
if not idx.isValid():
return info_dialog(self, _('No matches'),
_('Could not find any matching shortcuts'), show=True,
show_copy_button=False)
info_dialog(self, _('No matches'),
_('Could not find any shortcuts matching %s')%query,
show=True, show_copy_button=False)
return
self.highlight_index(idx)
def highlight_index(self, idx):
@ -600,6 +610,7 @@ class ShortcutConfig(QWidget): # {{{
self.view.selectionModel().select(idx,
self.view.selectionModel().ClearAndSelect)
self.view.setCurrentIndex(idx)
self.view.setFocus(Qt.OtherFocusReason)
def find_next(self, *args):
idx = self.view.currentIndex()

View File

@ -24,6 +24,7 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
r('read_file_metadata', prefs)
r('swap_author_names', prefs)
r('add_formats_to_existing', prefs)
r('preserve_date_on_ctl', gprefs)
choices = [
(_('Ignore duplicate incoming formats'), 'ignore'),
(_('Overwrite existing duplicate formats'), 'overwrite'),

View File

@ -58,7 +58,7 @@
</item>
</layout>
</item>
<item row="2" column="0">
<item row="3" column="0">
<widget class="QCheckBox" name="opt_add_formats_to_existing">
<property name="toolTip">
<string>Automerge: If books with similar titles and authors found, merge the incoming formats automatically into
@ -72,7 +72,7 @@ Title match ignores leading indefinite articles (&quot;the&quot;, &quot;a&quot;,
</property>
</widget>
</item>
<item row="2" column="1">
<item row="3" column="1">
<widget class="QComboBox" name="opt_automerge">
<property name="toolTip">
<string>Automerge: If books with similar titles and authors found, merge the incoming formats automatically into
@ -88,7 +88,7 @@ Author matching is exact.</string>
</property>
</widget>
</item>
<item row="3" column="0">
<item row="4" column="0">
<widget class="QLabel" name="label_230">
<property name="text">
<string>&amp;Tags to apply when adding a book:</string>
@ -98,14 +98,14 @@ Author matching is exact.</string>
</property>
</widget>
</item>
<item row="3" column="1">
<item row="4" column="1">
<widget class="QLineEdit" name="opt_new_book_tags">
<property name="toolTip">
<string>A comma-separated list of tags that will be applied to books added to the library</string>
</property>
</widget>
</item>
<item row="4" column="0" colspan="2">
<item row="5" column="0" colspan="2">
<widget class="QGroupBox" name="metadata_box">
<property name="title">
<string>&amp;Configure metadata from file name</string>
@ -127,6 +127,13 @@ Author matching is exact.</string>
</layout>
</widget>
</item>
<item row="2" column="0" colspan="2">
<widget class="QCheckBox" name="opt_preserve_date_on_ctl">
<property name="text">
<string>When &amp;copying books from one library to another, preserve the date</string>
</property>
</widget>
</item>
</layout>
</widget>
<resources/>

View File

@ -239,6 +239,7 @@ class ConfigWidget(ConfigWidgetBase, Ui_Form):
self.plugin_view.selectionModel().select(idx,
self.plugin_view.selectionModel().ClearAndSelect)
self.plugin_view.setCurrentIndex(idx)
self.plugin_view.setFocus(Qt.OtherFocusReason)
def find_next(self, *args):
idx = self.plugin_view.currentIndex()

View File

@ -108,6 +108,12 @@ class SearchBox2(QComboBox): # {{{
self.colorize = colorize
self.clear()
def hide_completer_popup(self):
try:
self.lineEdit().completer().popup().setVisible(False)
except:
pass
def normalize_state(self):
self.setToolTip(self.tool_tip_text)
self.line_edit.setStyleSheet(
@ -163,6 +169,8 @@ class SearchBox2(QComboBox): # {{{
# Comes from the combobox itself
def keyPressEvent(self, event):
k = event.key()
if k in (Qt.Key_Enter, Qt.Key_Return):
return self.do_search()
if k not in (Qt.Key_Up, Qt.Key_Down):
QComboBox.keyPressEvent(self, event)
else:
@ -183,6 +191,7 @@ class SearchBox2(QComboBox): # {{{
self.do_search()
def _do_search(self, store_in_history=True):
self.hide_completer_popup()
text = unicode(self.currentText()).strip()
if not text:
return self.clear()
@ -219,15 +228,11 @@ class SearchBox2(QComboBox): # {{{
self.clear()
else:
self.normalize_state()
self.lineEdit().setCompleter(None)
self.setEditText(txt)
self.line_edit.end(False)
if emit_changed:
self.changed.emit()
self._do_search(store_in_history=store_in_history)
c = QCompleter()
self.lineEdit().setCompleter(c)
c.setCompletionMode(c.PopupCompletion)
self.focus_to_library.emit()
finally:
if not store_in_history:

View File

@ -10,7 +10,7 @@ from xml.sax.saxutils import escape
from lxml import etree
from types import StringType, UnicodeType
from calibre import prints, prepare_string_for_xml, strftime
from calibre import (prints, prepare_string_for_xml, strftime, force_unicode)
from calibre.constants import preferred_encoding, DEBUG
from calibre.customize import CatalogPlugin
from calibre.customize.conversion import OptionRecommendation, DummyReporter
@ -1083,15 +1083,11 @@ class EPUB_MOBI(CatalogPlugin):
self.__totalSteps += incremental_jobs
# Load section list templates
templates = []
with open(P('catalog/section_list_templates.py'), 'r') as f:
for line in f:
t = re.match("(by_.+_template)",line)
if t:
templates.append(t.group(1))
execfile(P('catalog/section_list_templates.py'), locals())
for t in templates:
setattr(self,t,eval(t))
templates = {}
execfile(P('catalog/section_list_templates.py'), templates)
for name, template in templates.iteritems():
if name.startswith('by_') and name.endswith('_template'):
setattr(self, name, force_unicode(template, 'utf-8'))
# Accessors
if True:

View File

@ -284,11 +284,22 @@ The most likely cause of this is your antivirus program. Try temporarily disabli
I cannot send emails using |app|?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Because of the large amount of spam in email, sending email can be tricky as different servers use different strategies to block email spam.
Because of the large amount of spam in email, sending email can be tricky, as different mail servers use different strategies to block email.
The most common problem is if you are sending email directly (without a mail relay) in |app|. Many servers (for example, Amazon) block email
that does not come from a well known relay. The easiest way around this is to setup a free GMail account and then goto Preferences->Email in |app| and
click the "Use Gmail" button. |app| will then use Gmail to send the mail. Remember to update the email preferences in on your Amazon Kindle page to
allow email sent from your Gmail email address.
that does not come from a well known relay. The most robust way to setup email sending in |app| is to do the following:
* Create a free GMail account at `Google <http://www.gmail.com>`_.
* Goto Preferences->Email in |app| and click the "Use Gmail" button and fill in the information asked for.
* |app| will then use GMail to send the mail.
* If you are sending to your Kindle, remember to update the email preferences on your Amazon Kindle page to allow email sent from your GMail email address.
Even after doing this, you may have problems. One common source of problems is that some poorly designed antivirus
programs block |app| from opening a connection to send email. Try adding an exclusion for |app| in your
antivirus program.
.. note:: Google can disable your account if you use it to send large amounts of email. So, when using GMail to send mail |app| automatically restricts
itself to sending one book every five minutes. If you don't mind risking your account being blocked you can reduce this wait interval by
going to Preferences->Tweaks in |app|.
Why is my device not detected in linux?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -307,7 +318,7 @@ must return ``CONFIG_SCSI_MULTI_LUN=y``. If you don't see either, you have to re
My device is getting mounted read-only in linux, so |app| cannot connect to it?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
linux kernels mount devices read-only when their filesystems have errors. You can repair the filesystem with::
Linux kernels mount devices read-only when their filesystems have errors. You can repair the filesystem with::
sudo fsck.vfat -y /dev/sdc

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More