mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-07 18:24:30 -04:00
KG updates 0.7.36 final
This commit is contained in:
commit
7351b16d00
@ -4,6 +4,97 @@
|
||||
# for important features/bug fixes.
|
||||
# Also, each release can have new and improved recipes.
|
||||
|
||||
- version: 0.7.36
|
||||
date: 2010-01-01
|
||||
|
||||
new features:
|
||||
- title: "Tag browser: Add subcategories and search"
|
||||
description: "When a category has many items, it will be automatically split up. Also add a search to quickly find an item in the Tag Browser. The sub categories can be controlled via preferences->Tweaks. Also add a button to collapse all categories"
|
||||
type: major
|
||||
|
||||
- title: "Device drivers for the Google Nexus S, Motorola Backflip, Samsung Galaxy Tablet, PocketBook 603/903, EEEReader DR900 and the NextBook"
|
||||
|
||||
- title: "Tag editor dialog now remebers its last used size"
|
||||
tickets: [8063]
|
||||
|
||||
- title: "OS X dmg: Add a symlink pointing to the Applications folder for easy installation"
|
||||
tickets: [8052]
|
||||
|
||||
- title: "Catalog generation: CSV/XML catalogs now support custom fields. Also write UTF-8 BOM to CSV output file."
|
||||
tickets: [8014]
|
||||
|
||||
- title: "EPUB/MOBI catalogs: Various new features"
|
||||
description: "Added a custom field/value for excluding books, OR'd with existing tag list. Added a thumbnail width hint, from 1.0 - 2.0 inches. Deprecated support for special note tag '*', added support for custom column containing note to be inserted in Description header. Added 'Merge with comments' feature, which non-destructively combines Comments with a custom field when generating Descriptions. Moved Description header into a user-editable template file. All fields except thumb and comments accessible to template."
|
||||
tickets: [7820, 5297, 6765]
|
||||
|
||||
- title: "SONY driver: Allow the creation of an All by Something category via the tweaks."
|
||||
|
||||
- title: "Add a tweak to control the delay when sending mails using gmail or hotmail."
|
||||
tickets: [8064]
|
||||
|
||||
- title: "Add output encoding option for TXT/PDB/PMLX output plugins to the GUI"
|
||||
|
||||
- title: "Add an environment variable to control the temporary directory calibre uses"
|
||||
|
||||
- title: "Use the new HTML editor widget for comments custom columns as well"
|
||||
|
||||
- title: "Content server: Fix regression that broke saved searches"
|
||||
tickets: [8047]
|
||||
|
||||
- title: "E-book viewer: Fix regression that broke previous page button"
|
||||
|
||||
- title: "Add a tweak to allow double clicking on the book list to open the edit metadata dialog"
|
||||
tickets: [8032]
|
||||
|
||||
- title: "Add a tweak to use a template for formatting SONY collection names"
|
||||
tickets: [8033]
|
||||
|
||||
- title: "Bulk edit metadata, search and replace: Show all values for multiple fields in the text region, separated by :::"
|
||||
tickets: [8030]
|
||||
|
||||
- title: "Update user agent used by calibre when connecting to websites"
|
||||
|
||||
bug fixes:
|
||||
- title: "FB2 Output: Fix regression that broke images in generated FB2 files"
|
||||
tickets: [8142]
|
||||
|
||||
- title: "When unzipping zip files that contain filenames with unknown character encoding, sanitize the filenames correctly"
|
||||
tickets: [8050]
|
||||
|
||||
- title: "TCR Output: Fix TCR compression adding junk to the end of the text. Remove compression level option."
|
||||
|
||||
- title: "PDF Output: Fix regression that broke the margin options."
|
||||
|
||||
- title: "FB2 Input: Handle non UTF-8 encodings on OS X"
|
||||
tickets: [8115]
|
||||
|
||||
- title: "SNB Input: Better error handling if some metadata is missing in the SNB file. Add Wi-Fi connection support for the Bambook"
|
||||
|
||||
- title: "Allow hyperlinks to be clicked in comments metadata in the book details panel"
|
||||
tickets: [8054]
|
||||
|
||||
improved recipes:
|
||||
- Brand Eins
|
||||
- Volksrant
|
||||
- Smithsonian
|
||||
- Business World
|
||||
- El Universal
|
||||
- Salon
|
||||
- The Week
|
||||
- EL Pais
|
||||
- Wired Magazine
|
||||
- Heraldo de Aragon
|
||||
|
||||
new recipes:
|
||||
- title: "Karlsruhe News"
|
||||
author: "tfeld"
|
||||
|
||||
- title: "El Periodico and Red Aragon"
|
||||
author: "desUBIKado"
|
||||
|
||||
- title: "Business Insider"
|
||||
author: "Darko Miletic"
|
||||
|
||||
- version: 0.7.35
|
||||
date: 2010-12-23
|
||||
|
||||
|
@ -1,19 +1,16 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 mode: python -*-
|
||||
|
||||
# Find the newest version of this recipe here:
|
||||
# https://github.com/consti/BrandEins-Recipe/raw/master/brandeins.recipe
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2010, Constantin Hofstetter <consti at consti.de>, Steffen Siebert <calibre at steffensiebert.de>'
|
||||
__version__ = '0.96'
|
||||
__version__ = '0.97'
|
||||
|
||||
''' http://brandeins.de - Wirtschaftsmagazin '''
|
||||
import re
|
||||
import string
|
||||
from calibre.ebooks.BeautifulSoup import Tag
|
||||
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||
|
||||
|
||||
class BrandEins(BasicNewsRecipe):
|
||||
|
||||
title = u'brand eins'
|
||||
@ -28,6 +25,8 @@ class BrandEins(BasicNewsRecipe):
|
||||
language = 'de'
|
||||
publication_type = 'magazine'
|
||||
needs_subscription = 'optional'
|
||||
# Prevent that conversion date is appended to title
|
||||
timefmt = ''
|
||||
|
||||
# 2 is the last full magazine (default)
|
||||
# 1 is the newest (but not full)
|
||||
@ -66,6 +65,13 @@ class BrandEins(BasicNewsRecipe):
|
||||
new_p = "<p><i>"+ content +"</i></p>"
|
||||
p.replaceWith(new_p)
|
||||
|
||||
# Change <h3> to <h1>
|
||||
header = soup.find("h3")
|
||||
if header:
|
||||
tag = Tag(soup, "h1")
|
||||
tag.insert(0, header.contents[0])
|
||||
header.replaceWith(tag)
|
||||
|
||||
return soup
|
||||
|
||||
def get_cover(self, soup):
|
||||
@ -77,6 +83,7 @@ class BrandEins(BasicNewsRecipe):
|
||||
|
||||
def parse_index(self):
|
||||
feeds = []
|
||||
issue_map = {}
|
||||
|
||||
archive = "http://www.brandeins.de/archiv.html"
|
||||
|
||||
@ -88,21 +95,31 @@ class BrandEins(BasicNewsRecipe):
|
||||
pass
|
||||
|
||||
soup = self.index_to_soup(archive)
|
||||
latest_jahrgang = soup.findAll('div', attrs={'class': re.compile(r'\bjahrgang-latest\b') })[0].findAll('ul')[0]
|
||||
pre_latest_issue = latest_jahrgang.findAll('a')[len(latest_jahrgang.findAll('a'))-issue]
|
||||
url = pre_latest_issue.get('href', False)
|
||||
# Get month and year of the magazine issue - build it out of the title of the cover
|
||||
self.timefmt = " " + re.search(r"(?P<date>\d\d\/\d\d\d\d)", pre_latest_issue.find('img').get('title', False)).group('date')
|
||||
issue_list = soup.findAll('div', attrs={'class': 'tx-brandeinsmagazine-pi1'})[0].findAll('a')
|
||||
issue_list = [i for i in issue_list if i.get('onmouseover', False)]
|
||||
for i in issue_list:
|
||||
issue_number_string = i.get('onmouseover', False)
|
||||
if issue_number_string:
|
||||
match = re.match("^switch_magazine\(([0-9]+), ([0-9]+)\)$", issue_number_string)
|
||||
issue_number = "%04i%02i" % (int(match.group(1)), int(match.group(2)))
|
||||
issue_map[issue_number] = i
|
||||
keys = issue_map.keys()
|
||||
keys.sort()
|
||||
keys.reverse()
|
||||
selected_issue = issue_map[keys[issue-1]]
|
||||
url = selected_issue.get('href', False)
|
||||
# Get the title for the magazin - build it out of the title of the cover - take the issue and year;
|
||||
self.title = "brand eins "+ re.search(r"(?P<date>\d\d\/\d\d\d\d)", selected_issue.find('img').get('title', False)).group('date')
|
||||
url = 'http://brandeins.de/'+url
|
||||
|
||||
# url = "http://www.brandeins.de/archiv/magazin/tierisch.html"
|
||||
titles_and_articles = self.brand_eins_parse_latest_issue(url)
|
||||
titles_and_articles = self.brand_eins_parse_issue(url)
|
||||
if titles_and_articles:
|
||||
for title, articles in titles_and_articles:
|
||||
feeds.append((title, articles))
|
||||
return feeds
|
||||
|
||||
def brand_eins_parse_latest_issue(self, url):
|
||||
def brand_eins_parse_issue(self, url):
|
||||
soup = self.index_to_soup(url)
|
||||
self.cover_url = self.get_cover(soup)
|
||||
article_lists = [soup.find('div', attrs={'class':'subColumnLeft articleList'}), soup.find('div', attrs={'class':'subColumnRight articleList'})]
|
||||
@ -145,4 +162,3 @@ class BrandEins(BasicNewsRecipe):
|
||||
current_articles.append({'title': title, 'url': url, 'description': description, 'date':''})
|
||||
titles_and_articles.append([chapter_title, current_articles])
|
||||
return titles_and_articles
|
||||
|
||||
|
@ -17,8 +17,8 @@ class SmithsonianMagazine(BasicNewsRecipe):
|
||||
remove_tags = [
|
||||
dict(name='iframe'),
|
||||
dict(name='div', attrs={'class':'article_sidebar_border'}),
|
||||
dict(name='div', attrs={'id':['article_sidebar_border', 'most-popular_large']}),
|
||||
#dict(name='ul', attrs={'class':'article-tools'}),
|
||||
dict(name='div', attrs={'id':['article_sidebar_border', 'most-popular_large', 'most-popular-body_large']}),
|
||||
##dict(name='ul', attrs={'class':'article-tools'}),
|
||||
dict(name='ul', attrs={'class':'cat-breadcrumb col three last'}),
|
||||
]
|
||||
|
||||
@ -37,16 +37,16 @@ class SmithsonianMagazine(BasicNewsRecipe):
|
||||
]
|
||||
|
||||
def preprocess_html(self, soup):
|
||||
story = soup.find(name='div', attrs={'id':'article-left'})
|
||||
#td = heading.findParent(name='td')
|
||||
#td.extract()
|
||||
story = soup.find(name='div', attrs={'id':'article-body'})
|
||||
##td = heading.findParent(name='td')
|
||||
##td.extract()
|
||||
soup = BeautifulSoup('<html><head><title>t</title></head><body></body></html>')
|
||||
body = soup.find(name='body')
|
||||
body.insert(0, story)
|
||||
return soup
|
||||
|
||||
def postprocess_html(self, soup, first):
|
||||
for p in soup.findAll(id='articlePaginationWrapper'): p.extract()
|
||||
if not first:
|
||||
for div in soup.findAll(id='article-head'): div.extract()
|
||||
return soup
|
||||
#def postprocess_html(self, soup, first):
|
||||
#for p in soup.findAll(id='articlePaginationWrapper'): p.extract()
|
||||
#if not first:
|
||||
#for div in soup.findAll(id='article-head'): div.extract()
|
||||
#return soup
|
||||
|
@ -1,4 +1,3 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import with_statement
|
||||
|
||||
@ -36,6 +35,9 @@ class AdvancedUserRecipe1249039563(BasicNewsRecipe):
|
||||
Change Log:
|
||||
Date: 10/10/10 - Modified code to include obfuscated to get the print version
|
||||
Author: Tony Stegall
|
||||
|
||||
Date: 01/01/11 - Modified for better results around December/January.
|
||||
Author: Martin Tarenskeen
|
||||
'''
|
||||
#######################################################################################################
|
||||
temp_files = []
|
||||
@ -47,6 +49,11 @@ class AdvancedUserRecipe1249039563(BasicNewsRecipe):
|
||||
br.open(url)
|
||||
year = date.today().year
|
||||
|
||||
try:
|
||||
response = br.follow_link(url_regex='.*?(%d)(\\/)(article)(\\/)(print)(\\/)'%year, nr = 0)
|
||||
html = response.read()
|
||||
except:
|
||||
year = year-1
|
||||
try:
|
||||
response = br.follow_link(url_regex='.*?(%d)(\\/)(article)(\\/)(print)(\\/)'%year, nr = 0)
|
||||
html = response.read()
|
||||
@ -54,6 +61,7 @@ class AdvancedUserRecipe1249039563(BasicNewsRecipe):
|
||||
response = br.open(url)
|
||||
html = response.read()
|
||||
|
||||
|
||||
self.temp_files.append(PersistentTemporaryFile('_fa.html'))
|
||||
self.temp_files[-1].write(html)
|
||||
self.temp_files[-1].close()
|
||||
@ -76,10 +84,3 @@ class AdvancedUserRecipe1249039563(BasicNewsRecipe):
|
||||
(u'Cultuur', u'http://www.volkskrant.nl/rss/kunst.rss'),
|
||||
(u'Gezondheid & Wetenschap', u'http://www.volkskrant.nl/rss/wetenschap.rss'),
|
||||
(u'Internet & Media', u'http://www.volkskrant.nl/rss/media.rss') ]
|
||||
|
||||
|
||||
'''
|
||||
example for formating
|
||||
'''
|
||||
# original url: http://www.volkskrant.nl/vk/nl/2668/Buitenland/article/detail/1031493/2010/10/10/Noord-Korea-ziet-nieuwe-leider.dhtml
|
||||
# print url : http://www.volkskrant.nl/vk/nl/2668/2010/article/print/detail/1031493/Noord-Korea-ziet-nieuwe-leider.dhtml
|
||||
|
@ -254,7 +254,7 @@ def browser(honor_time=True, max_time=2, mobile_browser=False):
|
||||
opener.set_handle_refresh(True, max_time=max_time, honor_time=honor_time)
|
||||
opener.set_handle_robots(False)
|
||||
opener.addheaders = [('User-agent', ' Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016' if mobile_browser else \
|
||||
'Mozilla/5.0 (X11; U; i686 Linux; en_US; rv:1.8.0.4) Gecko/20060508 Firefox/1.5.0.4')]
|
||||
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101210 Gentoo Firefox/3.6.13')]
|
||||
http_proxy = get_proxies().get('http', None)
|
||||
if http_proxy:
|
||||
opener.set_proxies({'http':http_proxy})
|
||||
|
@ -2,7 +2,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
__appname__ = 'calibre'
|
||||
__version__ = '0.7.35'
|
||||
__version__ = '0.7.36'
|
||||
__author__ = "Kovid Goyal <kovid@kovidgoyal.net>"
|
||||
|
||||
import re
|
||||
|
@ -35,7 +35,7 @@ class ANDROID(USBMS):
|
||||
|
||||
# Google
|
||||
0x18d1 : { 0x4e11 : [0x0100, 0x226, 0x227], 0x4e12: [0x0100, 0x226,
|
||||
0x227]},
|
||||
0x227], 0x4e21: [0x0100, 0x226, 0x227]},
|
||||
|
||||
# Samsung
|
||||
0x04e8 : { 0x681d : [0x0222, 0x0223, 0x0224, 0x0400],
|
||||
|
@ -200,8 +200,10 @@ class FB2MLizer(object):
|
||||
im = Image()
|
||||
im.load(item.data)
|
||||
im.set_compression_quality(70)
|
||||
data = im.export('jpg')
|
||||
raw_data = b64encode(data)
|
||||
imdata = im.export('jpg')
|
||||
raw_data = b64encode(imdata)
|
||||
else:
|
||||
raw_data = b64encode(item.data)
|
||||
# Don't put the encoded image on a single line.
|
||||
data = ''
|
||||
col = 1
|
||||
|
@ -35,7 +35,7 @@ class FB2Output(OutputFormatPlugin):
|
||||
rasterizer = SVGRasterizer()
|
||||
rasterizer(oeb_book, opts)
|
||||
except Unavailable:
|
||||
self.log.warn('SVG rasterizer unavailable, SVG will not be converted')
|
||||
log.warn('SVG rasterizer unavailable, SVG will not be converted')
|
||||
|
||||
linearize_jacket(oeb_book)
|
||||
|
||||
|
@ -27,7 +27,39 @@ def get_metadata(stream):
|
||||
with TemporaryDirectory() as tdir:
|
||||
with CurrentDir(tdir):
|
||||
path = zf.extract(f)
|
||||
return get_metadata(open(path, 'rb'), stream_type)
|
||||
mi = get_metadata(open(path,'rb'), stream_type)
|
||||
if stream_type == 'opf' and mi.application_id == None:
|
||||
try:
|
||||
# zip archive opf files without an application_id were assumed not to have a cover
|
||||
# reparse the opf and if cover exists read its data from zip archive for the metadata
|
||||
nmi = zip_opf_metadata(path, zf)
|
||||
return nmi
|
||||
except:
|
||||
pass
|
||||
return mi
|
||||
raise ValueError('No ebook found in ZIP archive')
|
||||
|
||||
|
||||
def zip_opf_metadata(opfpath, zf):
|
||||
from calibre.ebooks.metadata.opf2 import OPF
|
||||
if hasattr(opfpath, 'read'):
|
||||
f = opfpath
|
||||
opfpath = getattr(f, 'name', os.getcwd())
|
||||
else:
|
||||
f = open(opfpath, 'rb')
|
||||
opf = OPF(f, os.path.dirname(opfpath))
|
||||
mi = opf.to_book_metadata()
|
||||
# This is broken, in that it only works for
|
||||
# when both the OPF file and the cover file are in the root of the
|
||||
# zip file and the cover is an actual raster image, but I don't care
|
||||
# enough to make it more robust
|
||||
if getattr(mi, 'cover', None):
|
||||
covername = os.path.basename(mi.cover)
|
||||
mi.cover = None
|
||||
names = zf.namelist()
|
||||
if covername in names:
|
||||
fmt = covername.rpartition('.')[-1]
|
||||
data = zf.read(covername)
|
||||
mi.cover_data = (fmt, data)
|
||||
return mi
|
||||
|
||||
|
@ -402,7 +402,7 @@ class FieldStrings:
|
||||
Logic:
|
||||
self.__link_switch = re.compile(r'\\l\s{1,}(.*?)\s')
|
||||
"""
|
||||
self.__link_switch = re.compile(r'\\l\s{1,}(.*?)\s')
|
||||
self.__link_switch = re.compile(r'\\l\s{1,}"{0,1}(.*?)"{0,1}\s')
|
||||
the_string = name
|
||||
match_group = re.search(self.__link_switch, line)
|
||||
if match_group:
|
||||
|
@ -263,13 +263,13 @@ class EditorWidget(QWebView): # {{{
|
||||
if ev.key() in (Qt.Key_Tab, Qt.Key_Escape, Qt.Key_Backtab):
|
||||
ev.ignore()
|
||||
else:
|
||||
return QWebView.keyPressed(self, ev)
|
||||
return QWebView.keyPressEvent(self, ev)
|
||||
|
||||
def keyReleaseEvent(self, ev):
|
||||
if ev.key() in (Qt.Key_Tab, Qt.Key_Escape, Qt.Key_Backtab):
|
||||
ev.ignore()
|
||||
else:
|
||||
return QWebView.keyReleased(self, ev)
|
||||
return QWebView.keyReleaseEvent(self, ev)
|
||||
|
||||
|
||||
# }}}
|
||||
|
@ -4,7 +4,7 @@ from PyQt4.QtCore import SIGNAL, Qt
|
||||
from PyQt4.QtGui import QDialog
|
||||
|
||||
from calibre.gui2.dialogs.tag_editor_ui import Ui_TagEditor
|
||||
from calibre.gui2 import question_dialog, error_dialog
|
||||
from calibre.gui2 import question_dialog, error_dialog, gprefs
|
||||
from calibre.constants import islinux
|
||||
from calibre.utils.icu import sort_key
|
||||
|
||||
@ -49,6 +49,10 @@ class TagEditor(QDialog, Ui_TagEditor):
|
||||
self.connect(self.available_tags, SIGNAL('itemActivated(QListWidgetItem*)'), self.apply_tags)
|
||||
self.connect(self.applied_tags, SIGNAL('itemActivated(QListWidgetItem*)'), self.unapply_tags)
|
||||
|
||||
geom = gprefs.get('tag_editor_geometry', None)
|
||||
if geom is not None:
|
||||
self.restoreGeometry(geom)
|
||||
|
||||
|
||||
def delete_tags(self, item=None):
|
||||
confirms, deletes = [], []
|
||||
@ -121,3 +125,15 @@ class TagEditor(QDialog, Ui_TagEditor):
|
||||
self.applied_tags.addItem(tag)
|
||||
|
||||
self.add_tag_input.setText('')
|
||||
|
||||
def accept(self):
|
||||
self.save_state()
|
||||
return QDialog.accept(self)
|
||||
|
||||
def reject(self):
|
||||
self.save_state()
|
||||
return QDialog.reject(self)
|
||||
|
||||
def save_state(self):
|
||||
gprefs['tag_editor_geometry'] = bytearray(self.saveGeometry())
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user