mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Pull from trunk
This commit is contained in:
commit
d68e488e8b
@ -144,7 +144,7 @@ CONFIG += x86 ppc
|
||||
return _build_ext.build_extension(self, ext)
|
||||
|
||||
c_sources = [f for f in ext.sources if os.path.splitext(f)[1].lower() in ('.c', '.cpp', '.cxx')]
|
||||
compile_args = '/c /nologo /Ox /MD /W3 /GX /DNDEBUG'.split()
|
||||
compile_args = '/c /nologo /Ox /MD /W3 /EHsc /DNDEBUG'.split()
|
||||
compile_args += ext.extra_compile_args
|
||||
self.swig_opts = ''
|
||||
inc_dirs = self.include_dirs + [x.replace('/', '\\') for x in ext.include_dirs]
|
||||
@ -153,11 +153,12 @@ CONFIG += x86 ppc
|
||||
for f in c_sources:
|
||||
o = os.path.join(bdir, os.path.basename(f)+'.obj')
|
||||
objects.append(o)
|
||||
compiler = cc + ['/Tc'+f, '/Fo'+o]
|
||||
inf = '/Tp' if f.endswith('.cpp') else '/Tc'
|
||||
compiler = cc + [inf+f, '/Fo'+o]
|
||||
self.spawn(compiler)
|
||||
out = os.path.join(bdir, base+'.pyd')
|
||||
linker = [msvc.linker] + '/DLL /nologo /INCREMENTAL:NO'.split()
|
||||
linker += ['/LIBPATH:'+x for x in self.library_dirs]
|
||||
linker += ['/LIBPATH:'+x for x in self.library_dirs+ext.library_dirs]
|
||||
linker += [x+'.lib' for x in ext.libraries]
|
||||
linker += ['/EXPORT:init'+base] + objects + ['/OUT:'+out]
|
||||
self.spawn(linker)
|
||||
|
3
setup.py
3
setup.py
@ -66,10 +66,9 @@ if __name__ == '__main__':
|
||||
podofo_lib = '/usr/lib' if islinux else r'C:\podofo' if iswindows else \
|
||||
'/Users/kovid/podofo/lib'
|
||||
if os.path.exists(os.path.join(podofo_inc, 'podofo.h')):
|
||||
eca = ['/EHsc'] if iswindows else []
|
||||
optional.append(Extension('calibre.plugins.podofo',
|
||||
sources=['src/calibre/utils/podofo/podofo.cpp'],
|
||||
libraries=['podofo'], extra_compile_args=eca,
|
||||
libraries=['podofo'],
|
||||
library_dirs=[os.environ.get('PODOFO_LIB_DIR', podofo_lib)],
|
||||
include_dirs=\
|
||||
[os.environ.get('PODOFO_INC_DIR', podofo_inc)]))
|
||||
|
@ -2,7 +2,7 @@ __license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
__appname__ = 'calibre'
|
||||
__version__ = '0.5.11'
|
||||
__version__ = '0.5.12'
|
||||
__author__ = "Kovid Goyal <kovid@kovidgoyal.net>"
|
||||
'''
|
||||
Various run time constants.
|
||||
|
@ -508,6 +508,9 @@ class OPF(object):
|
||||
else:
|
||||
self.path_to_html_toc, self.html_toc_fragment = \
|
||||
toc.partition('#')[0], toc.partition('#')[-1]
|
||||
if not os.access(self.path_to_html_toc, os.R_OK) or \
|
||||
not os.path.isfile(self.path_to_html_toc):
|
||||
self.path_to_html_toc = None
|
||||
self.toc.read_html_toc(toc)
|
||||
except:
|
||||
pass
|
||||
|
@ -403,7 +403,7 @@ class MetadataSingleDialog(ResizableDialog, Ui_MetadataSingleDialog):
|
||||
pix = QPixmap()
|
||||
pix.loadFromData(self.cover_fetcher.cover_data)
|
||||
if pix.isNull():
|
||||
error_dialog(self.window, _('Bad cover'),
|
||||
error_dialog(self, _('Bad cover'),
|
||||
_('The cover is not a valid picture')).exec_()
|
||||
else:
|
||||
self.cover.setPixmap(pix)
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
6052
src/calibre/translations/calibre.pot
Normal file
6052
src/calibre/translations/calibre.pot
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -2,7 +2,7 @@
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
import re, string, time
|
||||
import re, time
|
||||
from calibre import strftime
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
@ -13,62 +13,51 @@ class Newsweek(BasicNewsRecipe):
|
||||
description = 'Weekly news and current affairs in the US'
|
||||
no_stylesheets = True
|
||||
language = _('English')
|
||||
|
||||
extra_css = '''
|
||||
#content { font-size:normal; font-family: serif }
|
||||
.story { font-size:normal }
|
||||
.HorizontalHeader {font-size:xx-large}
|
||||
.deck {font-size:x-large}
|
||||
'''
|
||||
keep_only_tags = [dict(name='div', id='content')]
|
||||
|
||||
remove_tags = [
|
||||
dict(name=['script', 'noscript']),
|
||||
dict(name='div', attrs={'class':['ad', 'SocialLinks', 'SocialLinksDiv',
|
||||
'channel', 'bot', 'nav', 'top',
|
||||
'EmailArticleBlock',
|
||||
'comments-and-social-links-wrapper',
|
||||
'inline-social-links-wrapper',
|
||||
'inline-social-links',
|
||||
]}),
|
||||
dict(name='div', attrs={'class':re.compile('box')}),
|
||||
dict(id=['ToolBox', 'EmailMain', 'EmailArticle', 'comment-box',
|
||||
'nw-comments'])
|
||||
]
|
||||
{'class':['navbar', 'ad', 'sponsorLinksArticle', 'mm-content',
|
||||
'inline-social-links-wrapper', 'email-article',
|
||||
'comments-and-social-links-wrapper', 'EmailArticleBlock']},
|
||||
{'id' : ['footer', 'ticker-data', 'topTenVertical',
|
||||
'digg-top-five', 'mesothorax', 'nw-comments',
|
||||
'ToolBox', 'EmailMain']},
|
||||
{'class': re.compile('related-cloud')},
|
||||
]
|
||||
keep_only_tags = [{'class':['article HorizontalHeader', 'articlecontent']}]
|
||||
|
||||
|
||||
recursions = 1
|
||||
match_regexps = [r'http://www.newsweek.com/id/\S+/page/\d+']
|
||||
|
||||
def find_title(self, section):
|
||||
d = {'scope':'Scope', 'thetake':'The Take', 'features':'Features',
|
||||
None:'Departments'}
|
||||
ans = None
|
||||
a = section.find('a', attrs={'name':True})
|
||||
if a is not None:
|
||||
ans = a['name']
|
||||
return d[ans]
|
||||
|
||||
def get_sections(self, soup):
|
||||
sections = []
|
||||
|
||||
def process_section(img):
|
||||
articles = []
|
||||
match = re.search(r'label_([^_.]+)', img['src'])
|
||||
if match is None:
|
||||
return
|
||||
title = match.group(1)
|
||||
if title in ['coverstory', 'more', 'tipsheet']:
|
||||
return
|
||||
title = string.capwords(title)
|
||||
def find_articles(self, section):
|
||||
ans = []
|
||||
for x in section.findAll('h5'):
|
||||
title = ' '.join(x.findAll(text=True)).strip()
|
||||
a = x.find('a')
|
||||
if not a: continue
|
||||
href = a['href']
|
||||
ans.append({'title':title, 'url':href, 'description':'', 'date': strftime('%a, %d %b')})
|
||||
if not ans:
|
||||
for x in section.findAll('div', attrs={'class':'hdlItem'}):
|
||||
a = x.find('a', href=True)
|
||||
if not a : continue
|
||||
title = ' '.join(a.findAll(text=True)).strip()
|
||||
href = a['href']
|
||||
if 'http://xtra.newsweek.com' in href: continue
|
||||
ans.append({'title':title, 'url':href, 'description':'', 'date': strftime('%a, %d %b')})
|
||||
|
||||
for a in img.parent.findAll('a', href=True):
|
||||
art, href = a.string, a['href']
|
||||
if not re.search('\d+$', href) or not art or 'Preview Article' in art:
|
||||
continue
|
||||
articles.append({
|
||||
'title':art, 'url':href, 'description':'',
|
||||
'content':'', 'date':''
|
||||
})
|
||||
sections.append((title, articles))
|
||||
|
||||
img.parent.extract()
|
||||
|
||||
for img in soup.findAll(src=re.compile('/label_')):
|
||||
process_section(img)
|
||||
|
||||
return sections
|
||||
#for x in ans:
|
||||
# x['url'] += '/output/print'
|
||||
return ans
|
||||
|
||||
|
||||
def parse_index(self):
|
||||
@ -83,49 +72,45 @@ class Newsweek(BasicNewsRecipe):
|
||||
self.timefmt = strftime(' [%d %b, %Y]', time.strptime(match.group(1), '%y%m%d'))
|
||||
self.cover_url = small.replace('coversmall', 'coverlarge')
|
||||
|
||||
sections = self.get_sections(soup)
|
||||
sections.insert(0, ('Main articles', []))
|
||||
|
||||
for tag in soup.findAll('h5'):
|
||||
a = tag.find('a', href=True)
|
||||
if a is not None:
|
||||
title = self.tag_to_string(a)
|
||||
if not title:
|
||||
a = 'Untitled article'
|
||||
art = {
|
||||
'title' : title,
|
||||
'url' : a['href'],
|
||||
'description':'', 'content':'',
|
||||
'date': strftime('%a, %d %b')
|
||||
}
|
||||
if art['title'] and art['url']:
|
||||
sections[0][1].append(art)
|
||||
return sections
|
||||
|
||||
sections = soup.findAll('div', attrs={'class':'featurewell'})
|
||||
titles = map(self.find_title, sections)
|
||||
articles = map(self.find_articles, sections)
|
||||
ans = list(zip(titles, articles))
|
||||
def fcmp(x, y):
|
||||
tx, ty = x[0], y[0]
|
||||
if tx == "Features": return cmp(1, 2)
|
||||
if ty == "Features": return cmp(2, 1)
|
||||
return cmp(tx, ty)
|
||||
return sorted(ans, cmp=fcmp)
|
||||
|
||||
def postprocess_html(self, soup, first_fetch):
|
||||
divs = list(soup.findAll('div', 'pagination'))
|
||||
if not divs:
|
||||
return
|
||||
divs[0].extract()
|
||||
if len(divs) > 1:
|
||||
soup.find('body')['style'] = 'page-break-after:avoid'
|
||||
divs[1].extract()
|
||||
|
||||
h1 = soup.find('h1')
|
||||
if not first_fetch:
|
||||
h1 = soup.find(id='headline')
|
||||
if h1:
|
||||
h1.extract()
|
||||
ai = soup.find('div', 'articleInfo')
|
||||
ai.extract()
|
||||
else:
|
||||
soup.find('body')['style'] = 'page-break-before:always; page-break-after:avoid;'
|
||||
div = soup.find(attrs={'class':'articleInfo'})
|
||||
if div:
|
||||
div.extract()
|
||||
divs = list(soup.findAll('div', 'pagination'))
|
||||
if not divs:
|
||||
return soup
|
||||
for div in divs[1:]: div.extract()
|
||||
all_a = divs[0].findAll('a', href=True)
|
||||
divs[0]['style']="display:none"
|
||||
if len(all_a) > 1:
|
||||
all_a[-1].extract()
|
||||
test = re.compile(self.match_regexps[0])
|
||||
for a in soup.findAll('a', href=test):
|
||||
if a not in all_a:
|
||||
del a['href']
|
||||
return soup
|
||||
|
||||
def get_current_issue(self):
|
||||
#from urllib2 import urlopen # For some reason mechanize fails
|
||||
#home = urlopen('http://www.newsweek.com').read()
|
||||
soup = self.index_to_soup('http://www.newsweek.com')#BeautifulSoup(home)
|
||||
img = soup.find('img', alt='Current Magazine')
|
||||
if img and img.parent.has_key('href'):
|
||||
return self.index_to_soup(img.parent['href'])
|
||||
soup = self.index_to_soup('http://www.newsweek.com')
|
||||
div = soup.find('div', attrs={'class':re.compile('more-from-mag')})
|
||||
if div is None: return None
|
||||
a = div.find('a')
|
||||
if a is not None:
|
||||
href = a['href'].split('#')[0]
|
||||
return self.index_to_soup(href)
|
||||
|
||||
|
39
src/calibre/web/feeds/recipes/recipe_slashdot.py
Normal file
39
src/calibre/web/feeds/recipes/recipe_slashdot.py
Normal file
@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env python
|
||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||
from __future__ import with_statement
|
||||
|
||||
__license__ = 'GPL v3'
|
||||
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
from calibre.web.feeds.news import BasicNewsRecipe
|
||||
|
||||
class Slashdot(BasicNewsRecipe):
|
||||
title = u'Slashdot.org'
|
||||
oldest_article = 7
|
||||
max_articles_per_feed = 100
|
||||
language = _('English')
|
||||
__author__ = 'floweros'
|
||||
no_stylesheets = True
|
||||
keep_only_tags = [dict(name='div',attrs={'id':'article'})]
|
||||
remove_tags = [
|
||||
dict(name='div',attrs={'id':'userlogin-title'}),
|
||||
dict(name='div',attrs={'id':'userlogin-content'}),
|
||||
dict(name='div',attrs={'id':'commentwrap'}),
|
||||
dict(name='span',attrs={'id':'more_comments_num_a'}),
|
||||
]
|
||||
|
||||
feeds = [
|
||||
(u'Slashdot',
|
||||
u'http://rss.slashdot.org/Slashdot/slashdot?m=5072'),
|
||||
(u'/. IT',
|
||||
u'http://rss.slashdot.org/Slashdot/slashdotIT'),
|
||||
(u'/. Hardware',
|
||||
u'http://rss.slashdot.org/Slashdot/slashdotHardware'),
|
||||
(u'/. Linux',
|
||||
u'http://rss.slashdot.org/Slashdot/slashdotLinux'),
|
||||
(u'/. Your Rights Online',
|
||||
u'http://rss.slashdot.org/Slashdot/slashdotYourRightsOnline')
|
||||
]
|
||||
|
||||
|
@ -99,11 +99,11 @@ class pot(OptionlessCommand):
|
||||
tempdir = tempfile.mkdtemp()
|
||||
pygettext(buf, ['-k', '__', '-p', tempdir]+files)
|
||||
src = buf.getvalue()
|
||||
pot = os.path.join(tempdir, __appname__+'.pot')
|
||||
pot = os.path.join(self.PATH, __appname__+'.pot')
|
||||
f = open(pot, 'wb')
|
||||
f.write(src)
|
||||
f.close()
|
||||
print 'Translations template:', pot
|
||||
print 'Translations template:', os.path.abspath(pot)
|
||||
return pot
|
||||
finally:
|
||||
sys.path.remove(os.path.abspath(self.PATH))
|
||||
@ -709,6 +709,7 @@ class upload(OptionlessCommand):
|
||||
description = 'Build and upload calibre to the servers'
|
||||
|
||||
sub_commands = [
|
||||
('pot', None),
|
||||
('stage1', None),
|
||||
('stage2', None),
|
||||
('stage3', None)
|
||||
|
Loading…
x
Reference in New Issue
Block a user