mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Merge from trunk
This commit is contained in:
commit
0894a616b8
BIN
resources/images/news/journalgazette.png
Normal file
BIN
resources/images/news/journalgazette.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 414 B |
64
resources/recipes/journalgazette.recipe
Normal file
64
resources/recipes/journalgazette.recipe
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
__author__ = 'somedayson & TonytheBookworm, revised by Cynthia Clavey'
|
||||||
|
__copyright__ = '2010, Cynthia Clavey cynvision@yahoo.com'
|
||||||
|
__version__ = '1.02'
|
||||||
|
__date__ = '05, september 2010'
|
||||||
|
__docformat__ = 'restructuredtext en'
|
||||||
|
from calibre.web.feeds.recipes import BasicNewsRecipe
|
||||||
|
|
||||||
|
class AdvancedUserRecipe1283666183(BasicNewsRecipe):
|
||||||
|
title = u'Journal Gazette Ft. Wayne IN'
|
||||||
|
__author__ = 'cynvision'
|
||||||
|
oldest_article = 1
|
||||||
|
max_articles_per_feed = 8
|
||||||
|
no_stylesheets = True
|
||||||
|
remove_javascript = True
|
||||||
|
use_embedded_content = False
|
||||||
|
keep_only_tags = [dict(name='div', attrs={'id':'mainContent'})]
|
||||||
|
extra_css = '#copyinfo { font-size: 6 ;} \n #photocredit { font-size: 6 ;} \n .pubinfo { font-size: 6 ;}'
|
||||||
|
masthead_url = 'http://www.journalgazette.net/img/icons/jgmini.gif'
|
||||||
|
# cover_url = 'http://www.journalgazette.net/img/icons/jgmini.gif'
|
||||||
|
encoding = 'cp1252'
|
||||||
|
|
||||||
|
feeds = [(u'Opinion', u'http://journalgazette.net/apps/pbcs.dll/section?Category=EDIT&template=blogrss&mime=xml'),
|
||||||
|
(u'Local News',u'http://journalgazette.net/apps/pbcs.dll/section?Category=LOCAL&template=blogrss&mime=xml') ,
|
||||||
|
(u'Sports',u'http://journalgazette.net/apps/pbcs.dll/section?Category=SPORTS&template=blogrss&mime=xml' ),
|
||||||
|
(u'Features',u'http://journalgazette.net/apps/pbcs.dll/section?Category=FEAT&template=blogrss&mime=xml'),
|
||||||
|
(u'Business',u'http://journalgazette.net/apps/pbcs.dll/section?Category=BIZ&template=blogrss&mime=xml'),
|
||||||
|
(u'Ice Chips',u'http://journalgazette.net/apps/pbcs.dll/section?Category=BLOGS11&template=blogrss&mime=xml '),
|
||||||
|
(u'Entertainment',u'http://journalgazette.net/apps/pbcs.dll/section?Category=ENT&template=blogrss&mime=xml'),
|
||||||
|
(u'Food',u'http://journalgazette.net/apps/pbcs.dll/section?Category=FOOD&template=blogrss&mime=xml')
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def print_version(self, url):
|
||||||
|
split1 = url.split("/")
|
||||||
|
#print 'THE SPLIT IS: ', split1
|
||||||
|
#url1 = split1[0]
|
||||||
|
#url2 = split1[1]
|
||||||
|
url3 = split1[2]
|
||||||
|
#url4 = split1[3]
|
||||||
|
url5 = split1[4]
|
||||||
|
url6 = split1[5]
|
||||||
|
url7 = split1[6]
|
||||||
|
#url8 = split1[7]
|
||||||
|
|
||||||
|
#need to convert to print_version
|
||||||
|
#originalversion is : http://www.journalgazette.net/article/20100905/EDIT10/309059959/1021/EDIT
|
||||||
|
#printversion should be: http://www.journalgazette.net/apps/pbcs.dll/article?AID=/20100905/EDIT10/309059959/-1/EDIT01&template=printart
|
||||||
|
#results of the split
|
||||||
|
#THE SPLIT IS: [u'http:', u'', u'www.journalgazette.net', u'article', u'20100905', u'EDIT10', u'309059959', u'1021', u'EDIT']
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
print_url = 'http://' + url3 + '/apps/pbcs.dll/article?AID=/' + url5 + '/' + url6 + '/' + url7 + '/-1/EDIT01&template=printart'
|
||||||
|
#print 'THIS URL WILL PRINT: ', print_url # this is a test string to see what the url is it will return
|
||||||
|
return print_url
|
||||||
|
|
||||||
|
def preprocess_html(self, soup):
|
||||||
|
for item in soup.findAll(style=True):
|
||||||
|
del item['style']
|
||||||
|
return soup
|
153
resources/recipes/wsj_free.recipe
Normal file
153
resources/recipes/wsj_free.recipe
Normal file
@ -0,0 +1,153 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
|
||||||
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
import copy
|
||||||
|
|
||||||
|
class WallStreetJournal(BasicNewsRecipe):
|
||||||
|
|
||||||
|
title = 'Wall Street Journal (free)'
|
||||||
|
__author__ = 'Kovid Goyal, Sujata Raman, Joshua Oster-Morris, Starson17'
|
||||||
|
description = 'News and current affairs'
|
||||||
|
language = 'en'
|
||||||
|
cover_url = 'http://dealbreaker.com/images/thumbs/Wall%20Street%20Journal%20A1.JPG'
|
||||||
|
max_articles_per_feed = 1000
|
||||||
|
timefmt = ' [%a, %b %d, %Y]'
|
||||||
|
no_stylesheets = True
|
||||||
|
|
||||||
|
extra_css = '''h1{color:#093D72 ; font-size:large ; font-family:Georgia,"Century Schoolbook","Times New Roman",Times,serif; }
|
||||||
|
h2{color:#474537; font-family:Georgia,"Century Schoolbook","Times New Roman",Times,serif; font-size:small; font-style:italic;}
|
||||||
|
.subhead{color:gray; font-family:Georgia,"Century Schoolbook","Times New Roman",Times,serif; font-size:small; font-style:italic;}
|
||||||
|
.insettipUnit {color:#666666; font-family:Arial,Sans-serif;font-size:xx-small }
|
||||||
|
.targetCaption{ font-size:x-small; color:#333333; font-family:Arial,Helvetica,sans-serif}
|
||||||
|
.article{font-family :Arial,Helvetica,sans-serif; font-size:x-small}
|
||||||
|
.tagline {color:#333333; font-size:xx-small}
|
||||||
|
.dateStamp {color:#666666; font-family:Arial,Helvetica,sans-serif}
|
||||||
|
h3{color:blue ;font-family:Arial,Helvetica,sans-serif; font-size:xx-small}
|
||||||
|
.byline{color:blue;font-family:Arial,Helvetica,sans-serif; font-size:xx-small}
|
||||||
|
h6{color:#333333; font-family:Georgia,"Century Schoolbook","Times New Roman",Times,serif; font-size:small;font-style:italic; }
|
||||||
|
.paperLocation{color:#666666; font-size:xx-small}'''
|
||||||
|
|
||||||
|
remove_tags_before = dict(name='h1')
|
||||||
|
remove_tags = [
|
||||||
|
dict(id=["articleTabs_tab_article", "articleTabs_tab_comments", "articleTabs_tab_interactive","articleTabs_tab_video","articleTabs_tab_map","articleTabs_tab_slideshow"]),
|
||||||
|
{'class':['footer_columns','network','insetCol3wide','interactive','video','slideshow','map','insettip','insetClose','more_in', "insetContent", 'articleTools_bottom', 'aTools', "tooltip", "adSummary", "nav-inline"]},
|
||||||
|
dict(name='div', attrs={'data-flash-settings':True}),
|
||||||
|
{'class':['insetContent embedType-interactive insetCol3wide','insetCol6wide','insettipUnit']},
|
||||||
|
dict(rel='shortcut icon'),
|
||||||
|
]
|
||||||
|
remove_tags_after = [dict(id="article_story_body"), {'class':"article story"},]
|
||||||
|
|
||||||
|
def postprocess_html(self, soup, first):
|
||||||
|
for tag in soup.findAll(name=['table', 'tr', 'td']):
|
||||||
|
tag.name = 'div'
|
||||||
|
|
||||||
|
for tag in soup.findAll('div', dict(id=["articleThumbnail_1", "articleThumbnail_2", "articleThumbnail_3", "articleThumbnail_4", "articleThumbnail_5", "articleThumbnail_6", "articleThumbnail_7"])):
|
||||||
|
tag.extract()
|
||||||
|
|
||||||
|
return soup
|
||||||
|
|
||||||
|
def wsj_get_index(self):
|
||||||
|
return self.index_to_soup('http://online.wsj.com/itp')
|
||||||
|
|
||||||
|
def wsj_add_feed(self,feeds,title,url):
|
||||||
|
self.log('Found section:', title)
|
||||||
|
if url.endswith('whatsnews'):
|
||||||
|
articles = self.wsj_find_wn_articles(url)
|
||||||
|
else:
|
||||||
|
articles = self.wsj_find_articles(url)
|
||||||
|
if articles:
|
||||||
|
feeds.append((title, articles))
|
||||||
|
return feeds
|
||||||
|
|
||||||
|
def parse_index(self):
|
||||||
|
soup = self.wsj_get_index()
|
||||||
|
|
||||||
|
date = soup.find('span', attrs={'class':'date-date'})
|
||||||
|
if date is not None:
|
||||||
|
self.timefmt = ' [%s]'%self.tag_to_string(date)
|
||||||
|
|
||||||
|
feeds = []
|
||||||
|
div = soup.find('div', attrs={'class':'itpHeader'})
|
||||||
|
div = div.find('ul', attrs={'class':'tab'})
|
||||||
|
for a in div.findAll('a', href=lambda x: x and '/itp/' in x):
|
||||||
|
pageone = a['href'].endswith('pageone')
|
||||||
|
if pageone:
|
||||||
|
title = 'Front Section'
|
||||||
|
url = 'http://online.wsj.com' + a['href']
|
||||||
|
feeds = self.wsj_add_feed(feeds,title,url)
|
||||||
|
title = 'What''s News'
|
||||||
|
url = url.replace('pageone','whatsnews')
|
||||||
|
feeds = self.wsj_add_feed(feeds,title,url)
|
||||||
|
else:
|
||||||
|
title = self.tag_to_string(a)
|
||||||
|
url = 'http://online.wsj.com' + a['href']
|
||||||
|
feeds = self.wsj_add_feed(feeds,title,url)
|
||||||
|
return feeds
|
||||||
|
|
||||||
|
def wsj_find_wn_articles(self, url):
|
||||||
|
soup = self.index_to_soup(url)
|
||||||
|
articles = []
|
||||||
|
|
||||||
|
whats_news = soup.find('div', attrs={'class':lambda x: x and 'whatsNews-simple' in x})
|
||||||
|
if whats_news is not None:
|
||||||
|
for a in whats_news.findAll('a', href=lambda x: x and '/article/' in x):
|
||||||
|
container = a.findParent(['p'])
|
||||||
|
meta = a.find(attrs={'class':'meta_sectionName'})
|
||||||
|
if meta is not None:
|
||||||
|
meta.extract()
|
||||||
|
title = self.tag_to_string(a).strip()
|
||||||
|
url = a['href']
|
||||||
|
desc = ''
|
||||||
|
if container is not None:
|
||||||
|
desc = self.tag_to_string(container)
|
||||||
|
|
||||||
|
articles.append({'title':title, 'url':url,
|
||||||
|
'description':desc, 'date':''})
|
||||||
|
|
||||||
|
self.log('\tFound WN article:', title)
|
||||||
|
|
||||||
|
return articles
|
||||||
|
|
||||||
|
def wsj_find_articles(self, url):
|
||||||
|
soup = self.index_to_soup(url)
|
||||||
|
|
||||||
|
whats_news = soup.find('div', attrs={'class':lambda x: x and 'whatsNews-simple' in x})
|
||||||
|
if whats_news is not None:
|
||||||
|
whats_news.extract()
|
||||||
|
|
||||||
|
articles = []
|
||||||
|
|
||||||
|
flavorarea = soup.find('div', attrs={'class':lambda x: x and 'ahed' in x})
|
||||||
|
if flavorarea is not None:
|
||||||
|
flavorstory = flavorarea.find('a', href=lambda x: x and x.startswith('/article'))
|
||||||
|
if flavorstory is not None:
|
||||||
|
flavorstory['class'] = 'mjLinkItem'
|
||||||
|
metapage = soup.find('span', attrs={'class':lambda x: x and 'meta_sectionName' in x})
|
||||||
|
if metapage is not None:
|
||||||
|
flavorstory.append( copy.copy(metapage) ) #metapage should always be A1 because that should be first on the page
|
||||||
|
|
||||||
|
for a in soup.findAll('a', attrs={'class':'mjLinkItem'}, href=True):
|
||||||
|
container = a.findParent(['li', 'div'])
|
||||||
|
meta = a.find(attrs={'class':'meta_sectionName'})
|
||||||
|
if meta is not None:
|
||||||
|
meta.extract()
|
||||||
|
title = self.tag_to_string(a).strip() + ' [%s]'%self.tag_to_string(meta)
|
||||||
|
url = 'http://online.wsj.com'+a['href']
|
||||||
|
desc = ''
|
||||||
|
p = container.find('p')
|
||||||
|
if p is not None:
|
||||||
|
desc = self.tag_to_string(p)
|
||||||
|
|
||||||
|
articles.append({'title':title, 'url':url,
|
||||||
|
'description':desc, 'date':''})
|
||||||
|
|
||||||
|
self.log('\tFound article:', title)
|
||||||
|
|
||||||
|
return articles
|
||||||
|
|
||||||
|
def cleanup(self):
|
||||||
|
self.browser.open('http://online.wsj.com/logout?url=http://online.wsj.com')
|
||||||
|
|
@ -159,7 +159,7 @@ class DeleteAction(InterfaceAction):
|
|||||||
if self.gui.stack.currentIndex() == 0:
|
if self.gui.stack.currentIndex() == 0:
|
||||||
if not confirm('<p>'+_('The selected books will be '
|
if not confirm('<p>'+_('The selected books will be '
|
||||||
'<b>permanently deleted</b> and the files '
|
'<b>permanently deleted</b> and the files '
|
||||||
'removed from your computer. Are you sure?')
|
'removed from your calibre library. Are you sure?')
|
||||||
+'</p>', 'library_delete_books', self.gui):
|
+'</p>', 'library_delete_books', self.gui):
|
||||||
return
|
return
|
||||||
ci = view.currentIndex()
|
ci = view.currentIndex()
|
||||||
|
@ -51,7 +51,8 @@ class EditMetadataAction(InterfaceAction):
|
|||||||
self.merge_books)
|
self.merge_books)
|
||||||
mb.addSeparator()
|
mb.addSeparator()
|
||||||
mb.addAction(_('Merge into first selected book - keep others'),
|
mb.addAction(_('Merge into first selected book - keep others'),
|
||||||
partial(self.merge_books, safe_merge=True))
|
partial(self.merge_books, safe_merge=True),
|
||||||
|
Qt.AltModifier+Qt.Key_M)
|
||||||
self.merge_menu = mb
|
self.merge_menu = mb
|
||||||
self.action_merge.setMenu(mb)
|
self.action_merge.setMenu(mb)
|
||||||
md.addSeparator()
|
md.addSeparator()
|
||||||
|
@ -125,7 +125,7 @@ class Browser(QScrollArea): # {{{
|
|||||||
self.close_button.clicked.connect(self.close_requested)
|
self.close_button.clicked.connect(self.close_requested)
|
||||||
self._osxl.addStretch(10)
|
self._osxl.addStretch(10)
|
||||||
self._osxl.addWidget(self.close_button)
|
self._osxl.addWidget(self.close_button)
|
||||||
self._osxl.addStretch(10)
|
#self._osxl.addStretch(10)
|
||||||
self._layout.addLayout(self._osxl)
|
self._layout.addLayout(self._osxl)
|
||||||
|
|
||||||
for name, plugins in self.category_map.items():
|
for name, plugins in self.category_map.items():
|
||||||
|
@ -522,13 +522,12 @@ class Main(MainWindow, MainWindowMixin, DeviceMixin, # {{{
|
|||||||
|
|
||||||
|
|
||||||
def shutdown(self, write_settings=True):
|
def shutdown(self, write_settings=True):
|
||||||
l = getattr(self, 'library_view', None)
|
try:
|
||||||
if l:
|
cf = self.library_view.model().db.clean
|
||||||
l = getattr(l, 'model', None);
|
except:
|
||||||
if l:
|
pass
|
||||||
l = l().db
|
else:
|
||||||
if l:
|
cf()
|
||||||
l.clean()
|
|
||||||
for action in self.iactions.values():
|
for action in self.iactions.values():
|
||||||
if not action.shutting_down():
|
if not action.shutting_down():
|
||||||
return
|
return
|
||||||
|
@ -345,6 +345,8 @@ Calibre has several keyboard shortcuts to save you time and mouse movement. Thes
|
|||||||
- Show book details
|
- Show book details
|
||||||
* - :kbd:`M`
|
* - :kbd:`M`
|
||||||
- Merge selected records
|
- Merge selected records
|
||||||
|
* - :kbd:`Alt+M`
|
||||||
|
- Merge selected records, keeping originals
|
||||||
* - :kbd:`O`
|
* - :kbd:`O`
|
||||||
- Open containing folder
|
- Open containing folder
|
||||||
* - :kbd:`S`
|
* - :kbd:`S`
|
||||||
|
Loading…
x
Reference in New Issue
Block a user