mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Various fixes for calibre to run on python 2.6
This commit is contained in:
parent
35d0c080f2
commit
a084b0e198
@ -22,7 +22,7 @@ Run an embedded python interpreter.
|
||||
)
|
||||
parser.add_option('-c', '--command', help='Run python code.', default=None)
|
||||
parser.add_option('--migrate', action='store_true', default=False,
|
||||
help='Migrate old database. Needs two arguments. Path to library1.db and path to new library folder.', default=False)
|
||||
help='Migrate old database. Needs two arguments. Path to library1.db and path to new library folder.')
|
||||
return parser
|
||||
|
||||
def update_zipfile(zipfile, mod, path):
|
||||
|
@ -1,37 +1,6 @@
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
|
||||
from calibre.ebooks.lrf.web.profiles.nytimes import NYTimes
|
||||
from calibre.ebooks.lrf.web.profiles.bbc import BBC
|
||||
from calibre.ebooks.lrf.web.profiles.newsweek import Newsweek
|
||||
from calibre.ebooks.lrf.web.profiles.economist import Economist
|
||||
from calibre.ebooks.lrf.web.profiles.newyorkreview import NewYorkReviewOfBooks
|
||||
from calibre.ebooks.lrf.web.profiles.spiegelde import SpiegelOnline
|
||||
from calibre.ebooks.lrf.web.profiles.zeitde import ZeitNachrichten
|
||||
from calibre.ebooks.lrf.web.profiles.faznet import FazNet
|
||||
from calibre.ebooks.lrf.web.profiles.wsj import WallStreetJournal
|
||||
from calibre.ebooks.lrf.web.profiles.barrons import Barrons
|
||||
from calibre.ebooks.lrf.web.profiles.portfolio import Portfolio
|
||||
from calibre.ebooks.lrf.web.profiles.cnn import CNN
|
||||
from calibre.ebooks.lrf.web.profiles.chr_mon import ChristianScienceMonitor
|
||||
from calibre.ebooks.lrf.web.profiles.jpost import JerusalemPost
|
||||
from calibre.ebooks.lrf.web.profiles.reuters import Reuters
|
||||
from calibre.ebooks.lrf.web.profiles.atlantic import Atlantic
|
||||
from calibre.ebooks.lrf.web.profiles.ap import AssociatedPress
|
||||
from calibre.ebooks.lrf.web.profiles.newyorker import NewYorker
|
||||
from calibre.ebooks.lrf.web.profiles.jutarnji import Jutarnji
|
||||
from calibre.ebooks.lrf.web.profiles.usatoday import USAToday
|
||||
from calibre.ebooks.lrf.web.profiles.upi import UnitedPressInternational
|
||||
from calibre.ebooks.lrf.web.profiles.wash_post import WashingtonPost
|
||||
from calibre.ebooks.lrf.web.profiles.nasa import NASA
|
||||
|
||||
|
||||
builtin_profiles = [Atlantic, AssociatedPress, Barrons, BBC,
|
||||
ChristianScienceMonitor, CNN, Economist, FazNet,
|
||||
JerusalemPost, Jutarnji, NASA, Newsweek, NewYorker,
|
||||
NewYorkReviewOfBooks, NYTimes, UnitedPressInternational, USAToday,
|
||||
Portfolio, Reuters, SpiegelOnline, WallStreetJournal,
|
||||
WashingtonPost, ZeitNachrichten,
|
||||
]
|
||||
|
||||
available_profiles = [i.__module__.rpartition('.')[2] for i in builtin_profiles]
|
||||
builtin_profiles = []
|
||||
available_profiles = [i.__module__.rpartition('.')[2] for i in builtin_profiles]
|
||||
|
@ -61,7 +61,7 @@ class NASA(DefaultProfile):
|
||||
(re.compile(r'<!-- Top Header starts -->.*?<!---->', re.IGNORECASE | re.DOTALL), lambda match : '<New Stuff>'),
|
||||
|
||||
## This removes the "download image" of various sizes from the Image of the day.
|
||||
(re.compile(r'<div id="download_image_box_print">.*?<div id="caption_region_print">', re.IGNORECASE | re.DOTALL), lambda match : '<New Stuff>'),
|
||||
(re.compile(r'(?is)<div id="download_image_box_print">.*?<div id="caption_region_print">'), lambda match : '<New Stuff>'),
|
||||
|
||||
|
||||
]
|
||||
|
@ -304,10 +304,11 @@ class ResultCache(SearchQueryParser):
|
||||
|
||||
class Tag(unicode):
|
||||
|
||||
def __init__(self, name):
|
||||
unicode.__init__(self, name)
|
||||
self.count = 0
|
||||
self.state = 0
|
||||
def __new__(cls, *args):
|
||||
obj = super(Tag, cls).__new__(cls, *args)
|
||||
obj.count = 0
|
||||
obj.state = 0
|
||||
return obj
|
||||
|
||||
def as_string(self):
|
||||
return u'[%d] %s'%(self.count, self)
|
||||
|
@ -102,7 +102,7 @@ Device Integration
|
||||
|
||||
What devices does |app| support?
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
At the moment |app| has full support for the SONY PRS500 and PRS505. However, using the :guilabel:`Save to disk` function you can use it with any ebook reader that exports itself as a USB disk.
|
||||
At the moment |app| has full support for the SONY PRS 500/505/700 as well as the iPhone. In addition, using the :guilabel:`Save to disk` function you can use it with any ebook reader that exports itself as a USB disk.
|
||||
|
||||
I used |app| to transfer some books to my reader, and now the SONY software hangs every time I connect the reader?
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -117,6 +117,17 @@ Can I use the collections feature of the SONY reader?
|
||||
|app| has full support for collections. When you add tags to a book's metadata, those tags are turned into collections when you upload the book to the SONY reader. Also, the series information is automatically
|
||||
turned into a collection on the reader. Note that the PRS-500 does not support collections for books stored on the SD card. The PRS-505 does.
|
||||
|
||||
How do I use |app| with my iPhone?
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
First install the Stanza reader on your iPhone from http://www.lexcycle.com . Then,
|
||||
* Set the output format for calibre to EPUB (this can be done in the configuration dialog accessed by the little hammer icon next to the search bar)
|
||||
* Convert the books you want to read on your iPhone to EPUB format by selecting them and clicking the Convert button.
|
||||
* Turn on the Content Server in the configurations dialog and leave |app| running.
|
||||
* In the Stanza reader on your iPhone, add a new catalog. The URL of the catalog is of the form
|
||||
``http://10.34.56.89:8080/stanza``, where you should replace the IP address ``10.34.56.89``
|
||||
with the IP address of your computer. Stanza will the use the |app| content server to access all the
|
||||
EPUB books in your |app| database.
|
||||
|
||||
Library Management
|
||||
------------------
|
||||
|
||||
|
@ -29,7 +29,7 @@ Date: 9 Mar 2007
|
||||
|
||||
from __future__ import generators
|
||||
|
||||
import sys, warnings, os, fnmatch, glob, shutil, codecs, md5
|
||||
import sys, warnings, os, fnmatch, glob, shutil, codecs, hashlib
|
||||
|
||||
__version__ = '2.2'
|
||||
__all__ = ['path']
|
||||
@ -767,7 +767,7 @@ class path(_base):
|
||||
"""
|
||||
f = self.open('rb')
|
||||
try:
|
||||
m = md5.new()
|
||||
m = hashlib.md5()
|
||||
while True:
|
||||
d = f.read(8192)
|
||||
if not d:
|
||||
|
@ -335,9 +335,9 @@ class BasicNewsRecipe(object, LoggingInterface):
|
||||
It can be used to do arbitrarily powerful post-processing on the :term:`HTML`.
|
||||
It should return `soup` after processing it.
|
||||
|
||||
:param soup: A `BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/documentation.html>`_
|
||||
instance containing the downloaded :term:`HTML`.
|
||||
:param soup: A `BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/documentation.html>`_ instance containing the downloaded :term:`HTML`.
|
||||
:param first_fetch: True if this is the first page of an article.
|
||||
|
||||
'''
|
||||
return soup
|
||||
|
||||
|
@ -5,18 +5,18 @@ __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
Builtin recipes.
|
||||
'''
|
||||
recipe_modules = [
|
||||
'newsweek', 'atlantic', 'economist', 'portfolio',
|
||||
'newsweek', 'atlantic', 'economist', 'portfolio',
|
||||
'nytimes', 'usatoday', 'outlook_india', 'bbc', 'greader', 'wsj',
|
||||
'wired', 'globe_and_mail', 'smh', 'espn', 'business_week',
|
||||
'ars_technica', 'upi', 'new_yorker', 'irish_times', 'iht', 'lanacion',
|
||||
'discover_magazine', 'scientific_american', 'new_york_review_of_books',
|
||||
'daily_telegraph', 'guardian', 'el_pais', 'new_scientist', 'b92',
|
||||
'daily_telegraph', 'guardian', 'el_pais', 'new_scientist', 'b92',
|
||||
'politika', 'moscow_times', 'latimes', 'japan_times', 'san_fran_chronicle',
|
||||
'demorgen_be', 'de_standaard', 'ap', 'barrons', 'chr_mon', 'cnn', 'faznet',
|
||||
'jpost', 'jutarnji', 'nasa', 'reuters', 'spiegelde', 'wash_post', 'zeitde',
|
||||
'blic', 'novosti', 'danas', 'vreme', 'times_online', 'the_scotsman',
|
||||
'nytimes_sub', 'security_watch', 'cyberpresse', 'st_petersburg_times',
|
||||
'clarin', 'financial_times', 'heise'
|
||||
'clarin', 'financial_times', 'heise', 'le_monde'
|
||||
]
|
||||
|
||||
import re, imp, inspect, time, os
|
||||
@ -58,7 +58,7 @@ def compile_recipe(src):
|
||||
Compile the code in src and return the first object that is a recipe or profile.
|
||||
@param src: Python source code
|
||||
@type src: string
|
||||
@return: Recipe/Profile class or None, if no such class was found in C{src}
|
||||
@return: Recipe/Profile class or None, if no such class was found in C{src}
|
||||
'''
|
||||
global _tdir, _crep
|
||||
if _tdir is None or not os.path.exists(_tdir):
|
||||
@ -77,13 +77,13 @@ def compile_recipe(src):
|
||||
f.close()
|
||||
module = imp.find_module(temp.namebase, [temp.dirname()])
|
||||
module = imp.load_module(temp.namebase, *module)
|
||||
classes = inspect.getmembers(module,
|
||||
classes = inspect.getmembers(module,
|
||||
lambda x : inspect.isclass(x) and \
|
||||
issubclass(x, (DefaultProfile, BasicNewsRecipe)) and \
|
||||
x not in basic_recipes)
|
||||
if not classes:
|
||||
return None
|
||||
|
||||
|
||||
return classes[0][1]
|
||||
|
||||
|
||||
@ -92,7 +92,7 @@ def get_builtin_recipe(title):
|
||||
Return a builtin recipe/profile class whose title == C{title} or None if no such
|
||||
recipe exists. Also returns a flag that is True iff the found recipe is really
|
||||
an old-style Profile.
|
||||
|
||||
|
||||
@type title: string
|
||||
@rtype: class or None, boolean
|
||||
'''
|
||||
@ -117,9 +117,9 @@ class BasicUserRecipe%d(AutomaticNewsRecipe):
|
||||
oldest_article = %d
|
||||
max_articles_per_feed = %d
|
||||
summary_length = %d
|
||||
|
||||
|
||||
feeds = %s
|
||||
|
||||
'''%(int(time.time()), repr(profile.title), profile.oldest_article,
|
||||
|
||||
'''%(int(time.time()), repr(profile.title), profile.oldest_article,
|
||||
profile.max_articles_per_feed, profile.summary_length, repr(profile.feeds))
|
||||
|
||||
|
||||
|
@ -53,13 +53,12 @@ class NASA(BasicNewsRecipe):
|
||||
(r'<a.*?onclick.*?>.*?(<img .*?>)', lambda match: match.group(1),),
|
||||
|
||||
## This removes header and footer information from each print version.
|
||||
(re.compile(r'<!-- Top Header starts -->.*?<!-- Body starts -->', re.IGNORECASE | re.DOTALL), lambda match : '<New Stuff>'),
|
||||
(re.compile(r'<hr align="center" width="200"><p align="center">.*?<!-- Press Release standard text ends -->', re.IGNORECASE | re.DOTALL), lambda match : '<New Stuff>'),
|
||||
(re.compile(r'<!-- Top Header starts -->.*?<!---->', re.IGNORECASE | re.DOTALL), lambda match : '<New Stuff>'),
|
||||
(r'<!-- Top Header starts -->.*?<!-- Body starts -->', lambda match : '<New Stuff>'),
|
||||
(r'<hr align="center" width="200"><p align="center">.*?<!-- Press Release standard text ends -->', lambda match : '<New Stuff>'),
|
||||
(r'<!-- Top Header starts -->.*?<!---->', lambda match : '<New Stuff>'),
|
||||
|
||||
## This removes the "download image" of various sizes from the Image of the day.
|
||||
(re.compile(r'<div id="download_image_box_print">.*?<div id="caption_region_print">', re.IGNORECASE | re.DOTALL), lambda match : '<New Stuff>'),
|
||||
|
||||
(r'<div id="download_image_box_print">.*?<div id="caption_region_print">', lambda match : '<New Stuff>'),
|
||||
|
||||
]
|
||||
]
|
||||
|
@ -49,7 +49,7 @@ import utils
|
||||
import warnings
|
||||
from generic import *
|
||||
from utils import readNonWhitespace, readUntilWhitespace, ConvertFunctionsToVirtualList
|
||||
from sets import ImmutableSet
|
||||
|
||||
|
||||
##
|
||||
# This class supports writing PDF files out, given pages produced by another
|
||||
@ -986,8 +986,8 @@ class PageObject(DictionaryObject):
|
||||
|
||||
# Combine /ProcSet sets.
|
||||
newResources[NameObject("/ProcSet")] = ArrayObject(
|
||||
ImmutableSet(originalResources.get("/ProcSet", ArrayObject()).getObject()).union(
|
||||
ImmutableSet(page2Resources.get("/ProcSet", ArrayObject()).getObject())
|
||||
frozenset(originalResources.get("/ProcSet", ArrayObject()).getObject()).union(
|
||||
frozenset(page2Resources.get("/ProcSet", ArrayObject()).getObject())
|
||||
)
|
||||
)
|
||||
|
||||
|
9
todo-2.6
Normal file
9
todo-2.6
Normal file
@ -0,0 +1,9 @@
|
||||
|
||||
* Refactor web.fetch.simple to use per connection timeouts via the timeout kwarg for mechanize.open
|
||||
|
||||
* Refactor IPC code to use communication logic from multiprocessing
|
||||
|
||||
* Use multiprocessing for cpu_count instead of QThread
|
||||
|
||||
* Windows build:
|
||||
* Compile all dependencies with MSVC 2008 since this is what python now uses
|
Loading…
x
Reference in New Issue
Block a user