Various fixes for calibre to run on python 2.6

This commit is contained in:
Kovid Goyal 2008-12-03 16:50:01 -08:00
parent 35d0c080f2
commit a084b0e198
11 changed files with 52 additions and 63 deletions

View File

@ -22,7 +22,7 @@ Run an embedded python interpreter.
) )
parser.add_option('-c', '--command', help='Run python code.', default=None) parser.add_option('-c', '--command', help='Run python code.', default=None)
parser.add_option('--migrate', action='store_true', default=False, parser.add_option('--migrate', action='store_true', default=False,
help='Migrate old database. Needs two arguments. Path to library1.db and path to new library folder.', default=False) help='Migrate old database. Needs two arguments. Path to library1.db and path to new library folder.')
return parser return parser
def update_zipfile(zipfile, mod, path): def update_zipfile(zipfile, mod, path):

View File

@ -1,37 +1,6 @@
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>' __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
from calibre.ebooks.lrf.web.profiles.nytimes import NYTimes
from calibre.ebooks.lrf.web.profiles.bbc import BBC
from calibre.ebooks.lrf.web.profiles.newsweek import Newsweek
from calibre.ebooks.lrf.web.profiles.economist import Economist
from calibre.ebooks.lrf.web.profiles.newyorkreview import NewYorkReviewOfBooks
from calibre.ebooks.lrf.web.profiles.spiegelde import SpiegelOnline
from calibre.ebooks.lrf.web.profiles.zeitde import ZeitNachrichten
from calibre.ebooks.lrf.web.profiles.faznet import FazNet
from calibre.ebooks.lrf.web.profiles.wsj import WallStreetJournal
from calibre.ebooks.lrf.web.profiles.barrons import Barrons
from calibre.ebooks.lrf.web.profiles.portfolio import Portfolio
from calibre.ebooks.lrf.web.profiles.cnn import CNN
from calibre.ebooks.lrf.web.profiles.chr_mon import ChristianScienceMonitor
from calibre.ebooks.lrf.web.profiles.jpost import JerusalemPost
from calibre.ebooks.lrf.web.profiles.reuters import Reuters
from calibre.ebooks.lrf.web.profiles.atlantic import Atlantic
from calibre.ebooks.lrf.web.profiles.ap import AssociatedPress
from calibre.ebooks.lrf.web.profiles.newyorker import NewYorker
from calibre.ebooks.lrf.web.profiles.jutarnji import Jutarnji
from calibre.ebooks.lrf.web.profiles.usatoday import USAToday
from calibre.ebooks.lrf.web.profiles.upi import UnitedPressInternational
from calibre.ebooks.lrf.web.profiles.wash_post import WashingtonPost
from calibre.ebooks.lrf.web.profiles.nasa import NASA
builtin_profiles = []
builtin_profiles = [Atlantic, AssociatedPress, Barrons, BBC, available_profiles = [i.__module__.rpartition('.')[2] for i in builtin_profiles]
ChristianScienceMonitor, CNN, Economist, FazNet,
JerusalemPost, Jutarnji, NASA, Newsweek, NewYorker,
NewYorkReviewOfBooks, NYTimes, UnitedPressInternational, USAToday,
Portfolio, Reuters, SpiegelOnline, WallStreetJournal,
WashingtonPost, ZeitNachrichten,
]
available_profiles = [i.__module__.rpartition('.')[2] for i in builtin_profiles]

View File

@ -61,7 +61,7 @@ class NASA(DefaultProfile):
(re.compile(r'<!-- Top Header starts -->.*?<!---->', re.IGNORECASE | re.DOTALL), lambda match : '<New Stuff>'), (re.compile(r'<!-- Top Header starts -->.*?<!---->', re.IGNORECASE | re.DOTALL), lambda match : '<New Stuff>'),
## This removes the "download image" of various sizes from the Image of the day. ## This removes the "download image" of various sizes from the Image of the day.
(re.compile(r'<div id="download_image_box_print">.*?<div id="caption_region_print">', re.IGNORECASE | re.DOTALL), lambda match : '<New Stuff>'), (re.compile(r'(?is)<div id="download_image_box_print">.*?<div id="caption_region_print">'), lambda match : '<New Stuff>'),
] ]

View File

@ -304,10 +304,11 @@ class ResultCache(SearchQueryParser):
class Tag(unicode): class Tag(unicode):
def __init__(self, name): def __new__(cls, *args):
unicode.__init__(self, name) obj = super(Tag, cls).__new__(cls, *args)
self.count = 0 obj.count = 0
self.state = 0 obj.state = 0
return obj
def as_string(self): def as_string(self):
return u'[%d] %s'%(self.count, self) return u'[%d] %s'%(self.count, self)

View File

@ -102,7 +102,7 @@ Device Integration
What devices does |app| support? What devices does |app| support?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
At the moment |app| has full support for the SONY PRS500 and PRS505. However, using the :guilabel:`Save to disk` function you can use it with any ebook reader that exports itself as a USB disk. At the moment |app| has full support for the SONY PRS 500/505/700 as well as the iPhone. In addition, using the :guilabel:`Save to disk` function you can use it with any ebook reader that exports itself as a USB disk.
I used |app| to transfer some books to my reader, and now the SONY software hangs every time I connect the reader? I used |app| to transfer some books to my reader, and now the SONY software hangs every time I connect the reader?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -117,6 +117,17 @@ Can I use the collections feature of the SONY reader?
|app| has full support for collections. When you add tags to a book's metadata, those tags are turned into collections when you upload the book to the SONY reader. Also, the series information is automatically |app| has full support for collections. When you add tags to a book's metadata, those tags are turned into collections when you upload the book to the SONY reader. Also, the series information is automatically
turned into a collection on the reader. Note that the PRS-500 does not support collections for books stored on the SD card. The PRS-505 does. turned into a collection on the reader. Note that the PRS-500 does not support collections for books stored on the SD card. The PRS-505 does.
How do I use |app| with my iPhone?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
First install the Stanza reader on your iPhone from http://www.lexcycle.com . Then,
* Set the output format for calibre to EPUB (this can be done in the configuration dialog accessed by the little hammer icon next to the search bar)
* Convert the books you want to read on your iPhone to EPUB format by selecting them and clicking the Convert button.
* Turn on the Content Server in the configurations dialog and leave |app| running.
* In the Stanza reader on your iPhone, add a new catalog. The URL of the catalog is of the form
``http://10.34.56.89:8080/stanza``, where you should replace the IP address ``10.34.56.89``
with the IP address of your computer. Stanza will the use the |app| content server to access all the
EPUB books in your |app| database.
Library Management Library Management
------------------ ------------------

View File

@ -29,7 +29,7 @@ Date: 9 Mar 2007
from __future__ import generators from __future__ import generators
import sys, warnings, os, fnmatch, glob, shutil, codecs, md5 import sys, warnings, os, fnmatch, glob, shutil, codecs, hashlib
__version__ = '2.2' __version__ = '2.2'
__all__ = ['path'] __all__ = ['path']
@ -767,7 +767,7 @@ class path(_base):
""" """
f = self.open('rb') f = self.open('rb')
try: try:
m = md5.new() m = hashlib.md5()
while True: while True:
d = f.read(8192) d = f.read(8192)
if not d: if not d:

View File

@ -335,9 +335,9 @@ class BasicNewsRecipe(object, LoggingInterface):
It can be used to do arbitrarily powerful post-processing on the :term:`HTML`. It can be used to do arbitrarily powerful post-processing on the :term:`HTML`.
It should return `soup` after processing it. It should return `soup` after processing it.
:param soup: A `BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/documentation.html>`_ :param soup: A `BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/documentation.html>`_ instance containing the downloaded :term:`HTML`.
instance containing the downloaded :term:`HTML`.
:param first_fetch: True if this is the first page of an article. :param first_fetch: True if this is the first page of an article.
''' '''
return soup return soup

View File

@ -5,18 +5,18 @@ __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
Builtin recipes. Builtin recipes.
''' '''
recipe_modules = [ recipe_modules = [
'newsweek', 'atlantic', 'economist', 'portfolio', 'newsweek', 'atlantic', 'economist', 'portfolio',
'nytimes', 'usatoday', 'outlook_india', 'bbc', 'greader', 'wsj', 'nytimes', 'usatoday', 'outlook_india', 'bbc', 'greader', 'wsj',
'wired', 'globe_and_mail', 'smh', 'espn', 'business_week', 'wired', 'globe_and_mail', 'smh', 'espn', 'business_week',
'ars_technica', 'upi', 'new_yorker', 'irish_times', 'iht', 'lanacion', 'ars_technica', 'upi', 'new_yorker', 'irish_times', 'iht', 'lanacion',
'discover_magazine', 'scientific_american', 'new_york_review_of_books', 'discover_magazine', 'scientific_american', 'new_york_review_of_books',
'daily_telegraph', 'guardian', 'el_pais', 'new_scientist', 'b92', 'daily_telegraph', 'guardian', 'el_pais', 'new_scientist', 'b92',
'politika', 'moscow_times', 'latimes', 'japan_times', 'san_fran_chronicle', 'politika', 'moscow_times', 'latimes', 'japan_times', 'san_fran_chronicle',
'demorgen_be', 'de_standaard', 'ap', 'barrons', 'chr_mon', 'cnn', 'faznet', 'demorgen_be', 'de_standaard', 'ap', 'barrons', 'chr_mon', 'cnn', 'faznet',
'jpost', 'jutarnji', 'nasa', 'reuters', 'spiegelde', 'wash_post', 'zeitde', 'jpost', 'jutarnji', 'nasa', 'reuters', 'spiegelde', 'wash_post', 'zeitde',
'blic', 'novosti', 'danas', 'vreme', 'times_online', 'the_scotsman', 'blic', 'novosti', 'danas', 'vreme', 'times_online', 'the_scotsman',
'nytimes_sub', 'security_watch', 'cyberpresse', 'st_petersburg_times', 'nytimes_sub', 'security_watch', 'cyberpresse', 'st_petersburg_times',
'clarin', 'financial_times', 'heise' 'clarin', 'financial_times', 'heise', 'le_monde'
] ]
import re, imp, inspect, time, os import re, imp, inspect, time, os
@ -58,7 +58,7 @@ def compile_recipe(src):
Compile the code in src and return the first object that is a recipe or profile. Compile the code in src and return the first object that is a recipe or profile.
@param src: Python source code @param src: Python source code
@type src: string @type src: string
@return: Recipe/Profile class or None, if no such class was found in C{src} @return: Recipe/Profile class or None, if no such class was found in C{src}
''' '''
global _tdir, _crep global _tdir, _crep
if _tdir is None or not os.path.exists(_tdir): if _tdir is None or not os.path.exists(_tdir):
@ -77,13 +77,13 @@ def compile_recipe(src):
f.close() f.close()
module = imp.find_module(temp.namebase, [temp.dirname()]) module = imp.find_module(temp.namebase, [temp.dirname()])
module = imp.load_module(temp.namebase, *module) module = imp.load_module(temp.namebase, *module)
classes = inspect.getmembers(module, classes = inspect.getmembers(module,
lambda x : inspect.isclass(x) and \ lambda x : inspect.isclass(x) and \
issubclass(x, (DefaultProfile, BasicNewsRecipe)) and \ issubclass(x, (DefaultProfile, BasicNewsRecipe)) and \
x not in basic_recipes) x not in basic_recipes)
if not classes: if not classes:
return None return None
return classes[0][1] return classes[0][1]
@ -92,7 +92,7 @@ def get_builtin_recipe(title):
Return a builtin recipe/profile class whose title == C{title} or None if no such Return a builtin recipe/profile class whose title == C{title} or None if no such
recipe exists. Also returns a flag that is True iff the found recipe is really recipe exists. Also returns a flag that is True iff the found recipe is really
an old-style Profile. an old-style Profile.
@type title: string @type title: string
@rtype: class or None, boolean @rtype: class or None, boolean
''' '''
@ -117,9 +117,9 @@ class BasicUserRecipe%d(AutomaticNewsRecipe):
oldest_article = %d oldest_article = %d
max_articles_per_feed = %d max_articles_per_feed = %d
summary_length = %d summary_length = %d
feeds = %s feeds = %s
'''%(int(time.time()), repr(profile.title), profile.oldest_article, '''%(int(time.time()), repr(profile.title), profile.oldest_article,
profile.max_articles_per_feed, profile.summary_length, repr(profile.feeds)) profile.max_articles_per_feed, profile.summary_length, repr(profile.feeds))

View File

@ -53,13 +53,12 @@ class NASA(BasicNewsRecipe):
(r'<a.*?onclick.*?>.*?(<img .*?>)', lambda match: match.group(1),), (r'<a.*?onclick.*?>.*?(<img .*?>)', lambda match: match.group(1),),
## This removes header and footer information from each print version. ## This removes header and footer information from each print version.
(re.compile(r'<!-- Top Header starts -->.*?<!-- Body starts -->', re.IGNORECASE | re.DOTALL), lambda match : '<New Stuff>'), (r'<!-- Top Header starts -->.*?<!-- Body starts -->', lambda match : '<New Stuff>'),
(re.compile(r'<hr align="center" width="200"><p align="center">.*?<!-- Press Release standard text ends -->', re.IGNORECASE | re.DOTALL), lambda match : '<New Stuff>'), (r'<hr align="center" width="200"><p align="center">.*?<!-- Press Release standard text ends -->', lambda match : '<New Stuff>'),
(re.compile(r'<!-- Top Header starts -->.*?<!---->', re.IGNORECASE | re.DOTALL), lambda match : '<New Stuff>'), (r'<!-- Top Header starts -->.*?<!---->', lambda match : '<New Stuff>'),
## This removes the "download image" of various sizes from the Image of the day. ## This removes the "download image" of various sizes from the Image of the day.
(re.compile(r'<div id="download_image_box_print">.*?<div id="caption_region_print">', re.IGNORECASE | re.DOTALL), lambda match : '<New Stuff>'), (r'<div id="download_image_box_print">.*?<div id="caption_region_print">', lambda match : '<New Stuff>'),
] ]
] ]

View File

@ -49,7 +49,7 @@ import utils
import warnings import warnings
from generic import * from generic import *
from utils import readNonWhitespace, readUntilWhitespace, ConvertFunctionsToVirtualList from utils import readNonWhitespace, readUntilWhitespace, ConvertFunctionsToVirtualList
from sets import ImmutableSet
## ##
# This class supports writing PDF files out, given pages produced by another # This class supports writing PDF files out, given pages produced by another
@ -986,8 +986,8 @@ class PageObject(DictionaryObject):
# Combine /ProcSet sets. # Combine /ProcSet sets.
newResources[NameObject("/ProcSet")] = ArrayObject( newResources[NameObject("/ProcSet")] = ArrayObject(
ImmutableSet(originalResources.get("/ProcSet", ArrayObject()).getObject()).union( frozenset(originalResources.get("/ProcSet", ArrayObject()).getObject()).union(
ImmutableSet(page2Resources.get("/ProcSet", ArrayObject()).getObject()) frozenset(page2Resources.get("/ProcSet", ArrayObject()).getObject())
) )
) )

9
todo-2.6 Normal file
View File

@ -0,0 +1,9 @@
* Refactor web.fetch.simple to use per connection timeouts via the timeout kwarg for mechanize.open
* Refactor IPC code to use communication logic from multiprocessing
* Use multiprocessing for cpu_count instead of QThread
* Windows build:
* Compile all dependencies with MSVC 2008 since this is what python now uses