mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Pull from trunk
This commit is contained in:
commit
d68e488e8b
@ -144,7 +144,7 @@ CONFIG += x86 ppc
|
|||||||
return _build_ext.build_extension(self, ext)
|
return _build_ext.build_extension(self, ext)
|
||||||
|
|
||||||
c_sources = [f for f in ext.sources if os.path.splitext(f)[1].lower() in ('.c', '.cpp', '.cxx')]
|
c_sources = [f for f in ext.sources if os.path.splitext(f)[1].lower() in ('.c', '.cpp', '.cxx')]
|
||||||
compile_args = '/c /nologo /Ox /MD /W3 /GX /DNDEBUG'.split()
|
compile_args = '/c /nologo /Ox /MD /W3 /EHsc /DNDEBUG'.split()
|
||||||
compile_args += ext.extra_compile_args
|
compile_args += ext.extra_compile_args
|
||||||
self.swig_opts = ''
|
self.swig_opts = ''
|
||||||
inc_dirs = self.include_dirs + [x.replace('/', '\\') for x in ext.include_dirs]
|
inc_dirs = self.include_dirs + [x.replace('/', '\\') for x in ext.include_dirs]
|
||||||
@ -153,11 +153,12 @@ CONFIG += x86 ppc
|
|||||||
for f in c_sources:
|
for f in c_sources:
|
||||||
o = os.path.join(bdir, os.path.basename(f)+'.obj')
|
o = os.path.join(bdir, os.path.basename(f)+'.obj')
|
||||||
objects.append(o)
|
objects.append(o)
|
||||||
compiler = cc + ['/Tc'+f, '/Fo'+o]
|
inf = '/Tp' if f.endswith('.cpp') else '/Tc'
|
||||||
|
compiler = cc + [inf+f, '/Fo'+o]
|
||||||
self.spawn(compiler)
|
self.spawn(compiler)
|
||||||
out = os.path.join(bdir, base+'.pyd')
|
out = os.path.join(bdir, base+'.pyd')
|
||||||
linker = [msvc.linker] + '/DLL /nologo /INCREMENTAL:NO'.split()
|
linker = [msvc.linker] + '/DLL /nologo /INCREMENTAL:NO'.split()
|
||||||
linker += ['/LIBPATH:'+x for x in self.library_dirs]
|
linker += ['/LIBPATH:'+x for x in self.library_dirs+ext.library_dirs]
|
||||||
linker += [x+'.lib' for x in ext.libraries]
|
linker += [x+'.lib' for x in ext.libraries]
|
||||||
linker += ['/EXPORT:init'+base] + objects + ['/OUT:'+out]
|
linker += ['/EXPORT:init'+base] + objects + ['/OUT:'+out]
|
||||||
self.spawn(linker)
|
self.spawn(linker)
|
||||||
|
3
setup.py
3
setup.py
@ -66,10 +66,9 @@ if __name__ == '__main__':
|
|||||||
podofo_lib = '/usr/lib' if islinux else r'C:\podofo' if iswindows else \
|
podofo_lib = '/usr/lib' if islinux else r'C:\podofo' if iswindows else \
|
||||||
'/Users/kovid/podofo/lib'
|
'/Users/kovid/podofo/lib'
|
||||||
if os.path.exists(os.path.join(podofo_inc, 'podofo.h')):
|
if os.path.exists(os.path.join(podofo_inc, 'podofo.h')):
|
||||||
eca = ['/EHsc'] if iswindows else []
|
|
||||||
optional.append(Extension('calibre.plugins.podofo',
|
optional.append(Extension('calibre.plugins.podofo',
|
||||||
sources=['src/calibre/utils/podofo/podofo.cpp'],
|
sources=['src/calibre/utils/podofo/podofo.cpp'],
|
||||||
libraries=['podofo'], extra_compile_args=eca,
|
libraries=['podofo'],
|
||||||
library_dirs=[os.environ.get('PODOFO_LIB_DIR', podofo_lib)],
|
library_dirs=[os.environ.get('PODOFO_LIB_DIR', podofo_lib)],
|
||||||
include_dirs=\
|
include_dirs=\
|
||||||
[os.environ.get('PODOFO_INC_DIR', podofo_inc)]))
|
[os.environ.get('PODOFO_INC_DIR', podofo_inc)]))
|
||||||
|
@ -2,7 +2,7 @@ __license__ = 'GPL v3'
|
|||||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
__appname__ = 'calibre'
|
__appname__ = 'calibre'
|
||||||
__version__ = '0.5.11'
|
__version__ = '0.5.12'
|
||||||
__author__ = "Kovid Goyal <kovid@kovidgoyal.net>"
|
__author__ = "Kovid Goyal <kovid@kovidgoyal.net>"
|
||||||
'''
|
'''
|
||||||
Various run time constants.
|
Various run time constants.
|
||||||
|
@ -508,6 +508,9 @@ class OPF(object):
|
|||||||
else:
|
else:
|
||||||
self.path_to_html_toc, self.html_toc_fragment = \
|
self.path_to_html_toc, self.html_toc_fragment = \
|
||||||
toc.partition('#')[0], toc.partition('#')[-1]
|
toc.partition('#')[0], toc.partition('#')[-1]
|
||||||
|
if not os.access(self.path_to_html_toc, os.R_OK) or \
|
||||||
|
not os.path.isfile(self.path_to_html_toc):
|
||||||
|
self.path_to_html_toc = None
|
||||||
self.toc.read_html_toc(toc)
|
self.toc.read_html_toc(toc)
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
@ -403,7 +403,7 @@ class MetadataSingleDialog(ResizableDialog, Ui_MetadataSingleDialog):
|
|||||||
pix = QPixmap()
|
pix = QPixmap()
|
||||||
pix.loadFromData(self.cover_fetcher.cover_data)
|
pix.loadFromData(self.cover_fetcher.cover_data)
|
||||||
if pix.isNull():
|
if pix.isNull():
|
||||||
error_dialog(self.window, _('Bad cover'),
|
error_dialog(self, _('Bad cover'),
|
||||||
_('The cover is not a valid picture')).exec_()
|
_('The cover is not a valid picture')).exec_()
|
||||||
else:
|
else:
|
||||||
self.cover.setPixmap(pix)
|
self.cover.setPixmap(pix)
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
6052
src/calibre/translations/calibre.pot
Normal file
6052
src/calibre/translations/calibre.pot
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||||
import re, string, time
|
import re, time
|
||||||
from calibre import strftime
|
from calibre import strftime
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
@ -13,62 +13,51 @@ class Newsweek(BasicNewsRecipe):
|
|||||||
description = 'Weekly news and current affairs in the US'
|
description = 'Weekly news and current affairs in the US'
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
language = _('English')
|
language = _('English')
|
||||||
|
|
||||||
extra_css = '''
|
|
||||||
#content { font-size:normal; font-family: serif }
|
|
||||||
.story { font-size:normal }
|
|
||||||
.HorizontalHeader {font-size:xx-large}
|
|
||||||
.deck {font-size:x-large}
|
|
||||||
'''
|
|
||||||
keep_only_tags = [dict(name='div', id='content')]
|
|
||||||
|
|
||||||
remove_tags = [
|
remove_tags = [
|
||||||
dict(name=['script', 'noscript']),
|
{'class':['navbar', 'ad', 'sponsorLinksArticle', 'mm-content',
|
||||||
dict(name='div', attrs={'class':['ad', 'SocialLinks', 'SocialLinksDiv',
|
'inline-social-links-wrapper', 'email-article',
|
||||||
'channel', 'bot', 'nav', 'top',
|
'comments-and-social-links-wrapper', 'EmailArticleBlock']},
|
||||||
'EmailArticleBlock',
|
{'id' : ['footer', 'ticker-data', 'topTenVertical',
|
||||||
'comments-and-social-links-wrapper',
|
'digg-top-five', 'mesothorax', 'nw-comments',
|
||||||
'inline-social-links-wrapper',
|
'ToolBox', 'EmailMain']},
|
||||||
'inline-social-links',
|
{'class': re.compile('related-cloud')},
|
||||||
]}),
|
|
||||||
dict(name='div', attrs={'class':re.compile('box')}),
|
|
||||||
dict(id=['ToolBox', 'EmailMain', 'EmailArticle', 'comment-box',
|
|
||||||
'nw-comments'])
|
|
||||||
]
|
]
|
||||||
|
keep_only_tags = [{'class':['article HorizontalHeader', 'articlecontent']}]
|
||||||
|
|
||||||
|
|
||||||
recursions = 1
|
recursions = 1
|
||||||
match_regexps = [r'http://www.newsweek.com/id/\S+/page/\d+']
|
match_regexps = [r'http://www.newsweek.com/id/\S+/page/\d+']
|
||||||
|
|
||||||
|
def find_title(self, section):
|
||||||
|
d = {'scope':'Scope', 'thetake':'The Take', 'features':'Features',
|
||||||
|
None:'Departments'}
|
||||||
|
ans = None
|
||||||
|
a = section.find('a', attrs={'name':True})
|
||||||
|
if a is not None:
|
||||||
|
ans = a['name']
|
||||||
|
return d[ans]
|
||||||
|
|
||||||
def get_sections(self, soup):
|
|
||||||
sections = []
|
|
||||||
|
|
||||||
def process_section(img):
|
def find_articles(self, section):
|
||||||
articles = []
|
ans = []
|
||||||
match = re.search(r'label_([^_.]+)', img['src'])
|
for x in section.findAll('h5'):
|
||||||
if match is None:
|
title = ' '.join(x.findAll(text=True)).strip()
|
||||||
return
|
a = x.find('a')
|
||||||
title = match.group(1)
|
if not a: continue
|
||||||
if title in ['coverstory', 'more', 'tipsheet']:
|
href = a['href']
|
||||||
return
|
ans.append({'title':title, 'url':href, 'description':'', 'date': strftime('%a, %d %b')})
|
||||||
title = string.capwords(title)
|
if not ans:
|
||||||
|
for x in section.findAll('div', attrs={'class':'hdlItem'}):
|
||||||
|
a = x.find('a', href=True)
|
||||||
|
if not a : continue
|
||||||
|
title = ' '.join(a.findAll(text=True)).strip()
|
||||||
|
href = a['href']
|
||||||
|
if 'http://xtra.newsweek.com' in href: continue
|
||||||
|
ans.append({'title':title, 'url':href, 'description':'', 'date': strftime('%a, %d %b')})
|
||||||
|
|
||||||
for a in img.parent.findAll('a', href=True):
|
#for x in ans:
|
||||||
art, href = a.string, a['href']
|
# x['url'] += '/output/print'
|
||||||
if not re.search('\d+$', href) or not art or 'Preview Article' in art:
|
return ans
|
||||||
continue
|
|
||||||
articles.append({
|
|
||||||
'title':art, 'url':href, 'description':'',
|
|
||||||
'content':'', 'date':''
|
|
||||||
})
|
|
||||||
sections.append((title, articles))
|
|
||||||
|
|
||||||
img.parent.extract()
|
|
||||||
|
|
||||||
for img in soup.findAll(src=re.compile('/label_')):
|
|
||||||
process_section(img)
|
|
||||||
|
|
||||||
return sections
|
|
||||||
|
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
@ -83,49 +72,45 @@ class Newsweek(BasicNewsRecipe):
|
|||||||
self.timefmt = strftime(' [%d %b, %Y]', time.strptime(match.group(1), '%y%m%d'))
|
self.timefmt = strftime(' [%d %b, %Y]', time.strptime(match.group(1), '%y%m%d'))
|
||||||
self.cover_url = small.replace('coversmall', 'coverlarge')
|
self.cover_url = small.replace('coversmall', 'coverlarge')
|
||||||
|
|
||||||
sections = self.get_sections(soup)
|
sections = soup.findAll('div', attrs={'class':'featurewell'})
|
||||||
sections.insert(0, ('Main articles', []))
|
titles = map(self.find_title, sections)
|
||||||
|
articles = map(self.find_articles, sections)
|
||||||
for tag in soup.findAll('h5'):
|
ans = list(zip(titles, articles))
|
||||||
a = tag.find('a', href=True)
|
def fcmp(x, y):
|
||||||
if a is not None:
|
tx, ty = x[0], y[0]
|
||||||
title = self.tag_to_string(a)
|
if tx == "Features": return cmp(1, 2)
|
||||||
if not title:
|
if ty == "Features": return cmp(2, 1)
|
||||||
a = 'Untitled article'
|
return cmp(tx, ty)
|
||||||
art = {
|
return sorted(ans, cmp=fcmp)
|
||||||
'title' : title,
|
|
||||||
'url' : a['href'],
|
|
||||||
'description':'', 'content':'',
|
|
||||||
'date': strftime('%a, %d %b')
|
|
||||||
}
|
|
||||||
if art['title'] and art['url']:
|
|
||||||
sections[0][1].append(art)
|
|
||||||
return sections
|
|
||||||
|
|
||||||
|
|
||||||
def postprocess_html(self, soup, first_fetch):
|
def postprocess_html(self, soup, first_fetch):
|
||||||
divs = list(soup.findAll('div', 'pagination'))
|
if not first_fetch:
|
||||||
if not divs:
|
h1 = soup.find(id='headline')
|
||||||
return
|
|
||||||
divs[0].extract()
|
|
||||||
if len(divs) > 1:
|
|
||||||
soup.find('body')['style'] = 'page-break-after:avoid'
|
|
||||||
divs[1].extract()
|
|
||||||
|
|
||||||
h1 = soup.find('h1')
|
|
||||||
if h1:
|
if h1:
|
||||||
h1.extract()
|
h1.extract()
|
||||||
ai = soup.find('div', 'articleInfo')
|
div = soup.find(attrs={'class':'articleInfo'})
|
||||||
ai.extract()
|
if div:
|
||||||
else:
|
div.extract()
|
||||||
soup.find('body')['style'] = 'page-break-before:always; page-break-after:avoid;'
|
divs = list(soup.findAll('div', 'pagination'))
|
||||||
|
if not divs:
|
||||||
|
return soup
|
||||||
|
for div in divs[1:]: div.extract()
|
||||||
|
all_a = divs[0].findAll('a', href=True)
|
||||||
|
divs[0]['style']="display:none"
|
||||||
|
if len(all_a) > 1:
|
||||||
|
all_a[-1].extract()
|
||||||
|
test = re.compile(self.match_regexps[0])
|
||||||
|
for a in soup.findAll('a', href=test):
|
||||||
|
if a not in all_a:
|
||||||
|
del a['href']
|
||||||
return soup
|
return soup
|
||||||
|
|
||||||
def get_current_issue(self):
|
def get_current_issue(self):
|
||||||
#from urllib2 import urlopen # For some reason mechanize fails
|
soup = self.index_to_soup('http://www.newsweek.com')
|
||||||
#home = urlopen('http://www.newsweek.com').read()
|
div = soup.find('div', attrs={'class':re.compile('more-from-mag')})
|
||||||
soup = self.index_to_soup('http://www.newsweek.com')#BeautifulSoup(home)
|
if div is None: return None
|
||||||
img = soup.find('img', alt='Current Magazine')
|
a = div.find('a')
|
||||||
if img and img.parent.has_key('href'):
|
if a is not None:
|
||||||
return self.index_to_soup(img.parent['href'])
|
href = a['href'].split('#')[0]
|
||||||
|
return self.index_to_soup(href)
|
||||||
|
|
||||||
|
39
src/calibre/web/feeds/recipes/recipe_slashdot.py
Normal file
39
src/calibre/web/feeds/recipes/recipe_slashdot.py
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||||
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||||
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
|
class Slashdot(BasicNewsRecipe):
|
||||||
|
title = u'Slashdot.org'
|
||||||
|
oldest_article = 7
|
||||||
|
max_articles_per_feed = 100
|
||||||
|
language = _('English')
|
||||||
|
__author__ = 'floweros'
|
||||||
|
no_stylesheets = True
|
||||||
|
keep_only_tags = [dict(name='div',attrs={'id':'article'})]
|
||||||
|
remove_tags = [
|
||||||
|
dict(name='div',attrs={'id':'userlogin-title'}),
|
||||||
|
dict(name='div',attrs={'id':'userlogin-content'}),
|
||||||
|
dict(name='div',attrs={'id':'commentwrap'}),
|
||||||
|
dict(name='span',attrs={'id':'more_comments_num_a'}),
|
||||||
|
]
|
||||||
|
|
||||||
|
feeds = [
|
||||||
|
(u'Slashdot',
|
||||||
|
u'http://rss.slashdot.org/Slashdot/slashdot?m=5072'),
|
||||||
|
(u'/. IT',
|
||||||
|
u'http://rss.slashdot.org/Slashdot/slashdotIT'),
|
||||||
|
(u'/. Hardware',
|
||||||
|
u'http://rss.slashdot.org/Slashdot/slashdotHardware'),
|
||||||
|
(u'/. Linux',
|
||||||
|
u'http://rss.slashdot.org/Slashdot/slashdotLinux'),
|
||||||
|
(u'/. Your Rights Online',
|
||||||
|
u'http://rss.slashdot.org/Slashdot/slashdotYourRightsOnline')
|
||||||
|
]
|
||||||
|
|
||||||
|
|
@ -99,11 +99,11 @@ class pot(OptionlessCommand):
|
|||||||
tempdir = tempfile.mkdtemp()
|
tempdir = tempfile.mkdtemp()
|
||||||
pygettext(buf, ['-k', '__', '-p', tempdir]+files)
|
pygettext(buf, ['-k', '__', '-p', tempdir]+files)
|
||||||
src = buf.getvalue()
|
src = buf.getvalue()
|
||||||
pot = os.path.join(tempdir, __appname__+'.pot')
|
pot = os.path.join(self.PATH, __appname__+'.pot')
|
||||||
f = open(pot, 'wb')
|
f = open(pot, 'wb')
|
||||||
f.write(src)
|
f.write(src)
|
||||||
f.close()
|
f.close()
|
||||||
print 'Translations template:', pot
|
print 'Translations template:', os.path.abspath(pot)
|
||||||
return pot
|
return pot
|
||||||
finally:
|
finally:
|
||||||
sys.path.remove(os.path.abspath(self.PATH))
|
sys.path.remove(os.path.abspath(self.PATH))
|
||||||
@ -709,6 +709,7 @@ class upload(OptionlessCommand):
|
|||||||
description = 'Build and upload calibre to the servers'
|
description = 'Build and upload calibre to the servers'
|
||||||
|
|
||||||
sub_commands = [
|
sub_commands = [
|
||||||
|
('pot', None),
|
||||||
('stage1', None),
|
('stage1', None),
|
||||||
('stage2', None),
|
('stage2', None),
|
||||||
('stage3', None)
|
('stage3', None)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user