diff --git a/Changelog.yaml b/Changelog.yaml
index 174e7fc823..887ac4e05d 100644
--- a/Changelog.yaml
+++ b/Changelog.yaml
@@ -19,6 +19,59 @@
# new recipes:
# - title:
+- version: 0.8.13
+ date: 2011-08-05
+
+ new features:
+ - title: "Add a new action 'Pick Random Book' that can be added to the toolbar via Preferences->Toolbars."
+ tickets: [818315]
+
+ - title: "Driver for Droid X2"
+ tickets: [821053]
+
+ - title: "PDF metadata: Support reading/writing of tags from the Keywords field in PDF files."
+
+ - title: "MOBI Input: Speedup reading of HUFF/CDIC compressed files"
+
+ - title: "MOBI Output: Add a command line option --extract-to that uses the inspect MOBI tool to extract the created MOBI file to the specified directory"
+
+ - title: "Template language: Add a few new functions to manipulate lists (list_difference, list_intersection, list_sort)"
+
+ - title: "Make the Manage Tags/Publishers/etc. dialog show a column with counts for each item, to easily sort by number of items"
+
+ - title: "MOBI Output: Generate navpoints for items at every level in the TOC, not just the deepest level"
+
+ bug fixes:
+ - title: "MOBI Output: Remove option to choose masthead font as the font selection control causes crashes on some windows systems"
+
+ - title: "MOBI Output: Fix bug that caused paragraphs that had only a non breaking space as text before the first child element to be removed."
+ tickets: [819058]
+
+ - title: "Display undefined dates properly in the Book details panel."
+ tickets: [819222]
+
+ - title: "Fix regression that broke deleting of books from first generation Kobos with un-upgraded firmware"
+ tickets: [818704]
+
+ - title: "Get books: Fix Gutenberg store and improvements to chitanka.info and e-knigni.net"
+
+ - title: "News download: Support https proxies"
+
+ - title: "Check library did not know about original_* files"
+
+ - title: "Fix crash caused by having very large numbers of authors > 100 for a book"
+
+ improved recipes:
+ - Nikkei News
+
+ new recipes:
+ - title: Carta Capital
+ author: Pablo Aldama
+
+ - title: El Tiempo, El Colombiano and Portafolio Colombia
+ author: Cavalencia
+
+
- version: 0.8.12
date: 2011-07-29
diff --git a/imgsrc/mimetypes/djvu.svg b/imgsrc/mimetypes/djvu.svg
new file mode 100644
index 0000000000..401d9b4091
--- /dev/null
+++ b/imgsrc/mimetypes/djvu.svg
@@ -0,0 +1,438 @@
+
+
+
+
diff --git a/imgsrc/random.svg b/imgsrc/random.svg
new file mode 100644
index 0000000000..8dec21307e
--- /dev/null
+++ b/imgsrc/random.svg
@@ -0,0 +1,758 @@
+
+
+
+
diff --git a/recipes/carta_capital.recipe b/recipes/carta_capital.recipe
new file mode 100644
index 0000000000..8bd21046b1
--- /dev/null
+++ b/recipes/carta_capital.recipe
@@ -0,0 +1,23 @@
+from calibre.web.feeds.news import BasicNewsRecipe
+
+class AdvancedUserRecipe1312361378(BasicNewsRecipe):
+ title = u'Carta capital'
+ __author__ = 'Pablo Aldama'
+ language = 'pt_BR'
+ oldest_article = 9
+ max_articles_per_feed = 100
+
+ feeds = [(u'Politica', u'http://www.cartacapital.com.br/category/politica/feed')
+ ,(u'Economia', u'http://www.cartacapital.com.br/category/economia/feed')
+ ,(u'Cultura', u'http://www.cartacapital.com.br/category/cultura/feed')
+ ,(u'Internacional', u'http://www.cartacapital.com.br/category/internacional/feed')
+ ,(u'Saude', u'http://www.cartacapital.com.br/category/saude/feed')
+ ,(u'Sociedade', u'http://www.cartacapital.com.br/category/sociedade/feed')
+ ,(u'Tecnologia', u'http://www.cartacapital.com.br/category/tecnologia/feed')
+ ,(u'Carta na escola', u'http://www.cartacapital.com.br/category/carta-na-escola/feed')
+ ,(u'Carta fundamental', u'http://www.cartacapital.com.br/category/carta-fundamental/feed')
+ ,(u'Carta verde', u'http://www.cartacapital.com.br/category/carta-verde/feed')
+
+]
+ def print_version(self, url):
+ return url + '/print'
diff --git a/recipes/economist.recipe b/recipes/economist.recipe
index ded781b314..e46a074f3a 100644
--- a/recipes/economist.recipe
+++ b/recipes/economist.recipe
@@ -70,6 +70,11 @@ class Economist(BasicNewsRecipe):
def economist_parse_index(self):
soup = self.index_to_soup(self.INDEX)
+ div = soup.find('div', attrs={'class':'issue-image'})
+ if div is not None:
+ img = div.find('img', src=True)
+ if img is not None:
+ self.cover_url = img['src']
feeds = OrderedDict()
for section in soup.findAll(attrs={'class':lambda x: x and 'section' in
x}):
@@ -109,7 +114,9 @@ class Economist(BasicNewsRecipe):
'description':'', 'date':''})
if articles:
- feeds[section_title] = articles
+ if section_title not in feeds:
+ feeds[section_title] = []
+ feeds[section_title] += articles
ans = [(key, val) for key, val in feeds.iteritems()]
if not ans:
diff --git a/recipes/economist_free.recipe b/recipes/economist_free.recipe
index 71c323d6a3..73fb7b8891 100644
--- a/recipes/economist_free.recipe
+++ b/recipes/economist_free.recipe
@@ -1,3 +1,157 @@
+#!/usr/bin/env python
+
+__license__ = 'GPL v3'
+__copyright__ = '2008, Kovid Goyal '
+'''
+economist.com
+'''
+from calibre.web.feeds.news import BasicNewsRecipe
+from calibre.ebooks.BeautifulSoup import Tag, NavigableString
+from collections import OrderedDict
+
+import time, re
+
+class Economist(BasicNewsRecipe):
+
+ title = 'The Economist'
+ language = 'en'
+
+ __author__ = "Kovid Goyal"
+ INDEX = 'http://www.economist.com/printedition'
+ description = ('Global news and current affairs from a European'
+ ' perspective. Best downloaded on Friday mornings (GMT)')
+ extra_css = '.headline {font-size: x-large;} \n h2 { font-size: small; } \n h1 { font-size: medium; }'
+ oldest_article = 7.0
+ cover_url = 'http://media.economist.com/sites/default/files/imagecache/print-cover-thumbnail/print-covers/currentcoverus_large.jpg'
+ #cover_url = 'http://www.economist.com/images/covers/currentcoverus_large.jpg'
+ remove_tags = [
+ dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent']),
+ dict(attrs={'class':['dblClkTrk', 'ec-article-info',
+ 'share_inline_header', 'related-items']}),
+ {'class': lambda x: x and 'share-links-header' in x},
+ ]
+ keep_only_tags = [dict(id='ec-article-body')]
+ needs_subscription = False
+ no_stylesheets = True
+ preprocess_regexps = [(re.compile('