KG updates pre 0.7.14

This commit is contained in:
GRiker 2010-08-10 11:23:24 -07:00
commit 5ec9b55bcf
24 changed files with 3299 additions and 3228 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 356 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 464 B

View File

@ -0,0 +1,64 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
'''
www.la-razon.com
'''
from calibre import strftime
from calibre.web.feeds.news import BasicNewsRecipe
class LaRazon_Bol(BasicNewsRecipe):
title = 'La Razón - Bolivia'
__author__ = 'Darko Miletic'
description = 'El diario nacional de Bolivia'
publisher = 'Praxsis S.R.L.'
category = 'news, politics, Bolivia'
oldest_article = 1
max_articles_per_feed = 200
no_stylesheets = True
encoding = 'cp1252'
use_embedded_content = False
language = 'es'
publication_type = 'newspaper'
delay = 1
remove_empty_feeds = True
cover_url = strftime('http://www.la-razon.com/portadas/%Y%m%d_LaRazon.jpg')
masthead_url = 'http://www.la-razon.com/imagenes/logo.jpg'
extra_css = """ body{font-family: Arial,Helvetica,sans-serif }
img{margin-bottom: 0.4em}
.noticia-titulo{font-family: Georgia,"Times New Roman",Times,serif}
.lead{font-weight: bold; font-size: 0.8em}
"""
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
}
keep_only_tags = [dict(name='div', attrs={'class':['noticia-titulo','noticia-desarrollo']})]
remove_tags = [dict(name=['meta','link','form','iframe','embed','object'])]
remove_attributes = ['width','height']
feeds = [
(u'Editorial' , u'http://www.la-razon.com/rss_editorial.php' )
,(u'Opinión' , u'http://www.la-razon.com/rss_opinion.php' )
,(u'Nacional' , u'http://www.la-razon.com/rss_nacional.php' )
,(u'Economia' , u'http://www.la-razon.com/rss_economia.php' )
,(u'Ciudades' , u'http://www.la-razon.com/rss_ciudades.php' )
,(u'Sociedad' , u'http://www.la-razon.com/rss_sociedad.php' )
,(u'Mundo' , u'http://www.la-razon.com/rss_sociedad.php' )
,(u'La Revista' , u'http://www.la-razon.com/rss_larevista.php' )
,(u'Sociales' , u'http://www.la-razon.com/rss_sociales.php' )
,(u'Mia' , u'http://www.la-razon.com/rss_mia.php' )
,(u'Marcas' , u'http://www.la-razon.com/rss_marcas.php' )
,(u'Escape' , u'http://www.la-razon.com/rss_escape.php' )
,(u'El Financiero' , u'http://www.la-razon.com/rss_financiero.php')
,(u'Tendencias' , u'http://www.la-razon.com/rss_tendencias.php')
]
def preprocess_html(self, soup):
for item in soup.findAll(style=True):
del item['style']
return soup

View File

@ -0,0 +1,63 @@
__license__ = 'GPL v3'
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
'''
www.lostiempos.com
'''
from calibre import strftime
from calibre.web.feeds.news import BasicNewsRecipe
class LosTiempos_Bol(BasicNewsRecipe):
title = 'Los Tiempos - Bolivia'
__author__ = 'Darko Miletic'
description = 'El periódico de mayor circulación en la ciudad de Cochabamba, Bolivia'
publisher = 'Los Tiempos'
category = 'news, politics, Bolivia'
oldest_article = 1
max_articles_per_feed = 200
no_stylesheets = True
encoding = 'cp1252'
use_embedded_content = False
language = 'es'
publication_type = 'newspaper'
delay = 1
remove_empty_feeds = True
cover_url = strftime('http://www.lostiempos.com/media_recortes/%Y/%m/%d/portada_md_1.jpg')
masthead_url = 'http://www.lostiempos.com/img_stat/logo_tiempos_sin_beta.jpg'
extra_css = """ body{font-family: Arial,Helvetica,sans-serif }
img{margin-bottom: 0.4em}
h1,.hora,.breadcum,.pie_foto{font-family: Georgia,"Times New Roman",Times,serif}
.hora,.breadcum,.pie_foto{font-size: small}
.en_gris,.pie_foto{color: #666666}
"""
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
}
keep_only_tags = [dict(name='div', attrs={'id':'articulo'})]
remove_tags = [
dict(name=['meta','link','form','iframe','embed','object','hr'])
,dict(attrs={'class':['caja_fonts sin_border_bot','pub']})
]
remove_attributes = ['width','height']
feeds = [
(u'Nacional' , u'http://www.lostiempos.com/rss/lostiempos-nacional.xml' )
,(u'Local' , u'http://www.lostiempos.com/rss/lostiempos-local.xml' )
,(u'Deportes' , u'http://www.lostiempos.com/rss/lostiempos-deportes.xml' )
,(u'Economía' , u'http://www.lostiempos.com/rss/lostiempos-economia.xml' )
,(u'Internacional' , u'http://www.lostiempos.com/rss/lostiempos-internacional.xml' )
,(u'Vida y Futuro' , u'http://www.lostiempos.com/rss/lostiempos-vida-y-futuro.xml' )
,(u'Tragaluz' , u'http://www.lostiempos.com/rss/lostiempos-tragaluz.xml' )
,(u'Opiniones' , u'http://www.lostiempos.com/rss/lostiempos-opiniones.xml' )
]
def preprocess_html(self, soup):
for item in soup.findAll(style=True):
del item['style']
return soup

View File

@ -14,33 +14,39 @@ class Nspm(BasicNewsRecipe):
description = 'Casopis za politicku teoriju i drustvena istrazivanja'
publisher = 'NSPM'
category = 'news, politics, Serbia'
oldest_article = 2
oldest_article = 7
max_articles_per_feed = 100
no_stylesheets = True
use_embedded_content = False
INDEX = 'http://www.nspm.rs/?alphabet=l'
encoding = 'utf-8'
language = 'sr'
delay = 2
publication_type = 'magazine'
masthead_url = 'http://www.nspm.rs/templates/jsn_epic_pro/images/logol.jpg'
extra_css = ' @font-face {font-family: "serif1";src:url(res:///opt/sony/ebook/FONT/tt0011m_.ttf)} @font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)} body{font-family: "Times New Roman", serif1, serif} .article_description{font-family: Arial, sans1, sans-serif} img{margin-top:0.5em; margin-bottom: 0.7em} .author{color: #990000; font-weight: bold} .author,.createdate{font-size: 0.9em} img{margin-top:0.5em; margin-bottom: 0.7em} '
extra_css = """ @font-face {font-family: "serif1";src:url(res:///opt/sony/ebook/FONT/tt0011m_.ttf)}
@font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)}
body{font-family: "Times New Roman", serif1, serif}
.article_description{font-family: Arial, sans1, sans-serif}
img{margin-top:0.5em; margin-bottom: 0.7em}
.author{color: #990000; font-weight: bold}
.author,.createdate{font-size: 0.9em} """
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
, 'linearize_tables' : True
}
preprocess_regexps = [(re.compile(u'\u0110'), lambda match: u'\u00D0')]
keep_only_tags = [dict(attrs={'id':'jsn-mainbody'})]
remove_tags = [
dict(name=['link','object','embed','script','meta'])
,dict(name='td', attrs={'class':'buttonheading'})
]
keep_only_tags = [
dict(attrs={'class':['contentpagetitle','author','createdate']})
,dict(name='p')
dict(name=['link','object','embed','script','meta','base','iframe'])
,dict(attrs={'class':'buttonheading'})
]
remove_tags_after = dict(attrs={'class':'article_separator'})
remove_attributes = ['width','height']
def get_browser(self):
@ -48,25 +54,18 @@ class Nspm(BasicNewsRecipe):
br.open(self.INDEX)
return br
feeds = [(u'Nova srpska politicka misao', u'http://www.nspm.rs/feed/rss.html')]
def print_version(self, url):
return url.replace('.html','/stampa.html')
feeds = [
(u'Rubrike' , u'http://www.nspm.rs/rubrike/feed/rss.html')
,(u'Debate' , u'http://www.nspm.rs/debate/feed/rss.html')
,(u'Reci i misli' , u'http://www.nspm.rs/reci-i-misli/feed/rss.html')
,(u'Samo smeh srbina spasava', u'http://www.nspm.rs/samo-smeh-srbina-spasava/feed/rss.html')
,(u'Polemike' , u'http://www.nspm.rs/polemike/feed/rss.html')
,(u'Prikazi' , u'http://www.nspm.rs/prikazi/feed/rss.html')
,(u'Prenosimo' , u'http://www.nspm.rs/prenosimo/feed/rss.html')
,(u'Hronika' , u'http://www.nspm.rs/tabela/hronika/feed/rss.html')
]
def preprocess_html(self, soup):
for item in soup.body.findAll(style=True):
del item['style']
att = soup.find('a',attrs={'class':'contentpagetitle'})
if att:
att.name = 'h1';
del att['href']
att2 = soup.find('td')
if att2:
att2.name = 'p';
del att['valign']
for pt in soup.findAll('img'):
brtag = Tag(soup,'br')
brtag2 = Tag(soup,'br')
pt.append(brtag)
pt.append(brtag2)
return soup
return self.adeify_images(soup)

View File

@ -1,7 +1,5 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Darko Miletic <darko.miletic at gmail.com>'
__copyright__ = '2008-2010, Darko Miletic <darko.miletic at gmail.com>'
'''
nspm.rs/nspm-in-english
'''
@ -12,28 +10,43 @@ class Nspm_int(BasicNewsRecipe):
title = 'NSPM in English'
__author__ = 'Darko Miletic'
description = 'Magazine dedicated to political theory and sociological research'
oldest_article = 20
publisher = 'NSPM'
category = 'news, politics, Serbia'
oldest_article = 7
max_articles_per_feed = 100
language = 'en'
no_stylesheets = True
use_embedded_content = False
INDEX = 'http://www.nspm.rs/?alphabet=l'
cover_url = 'http://nspm.rs/templates/jsn_epic_pro/images/logol.jpg'
html2lrf_options = [
'--comment', description
, '--base-font-size', '10'
, '--category', 'news, politics, Serbia, english'
, '--publisher', 'IIC NSPM'
encoding = 'utf-8'
language = 'en'
delay = 2
publication_type = 'magazine'
masthead_url = 'http://www.nspm.rs/templates/jsn_epic_pro/images/logol.jpg'
extra_css = """
body{font-family: "Times New Roman", serif}
.article_description{font-family: Arial, sans-serif}
img{margin-top:0.5em; margin-bottom: 0.7em}
.author{color: #990000; font-weight: bold}
.author,.createdate{font-size: 0.9em} """
conversion_options = {
'comment' : description
, 'tags' : category
, 'publisher' : publisher
, 'language' : language
, 'linearize_tables' : True
}
keep_only_tags = [dict(attrs={'id':'jsn-mainbody'})]
remove_tags = [
dict(name=['link','object','embed','script','meta','base','iframe'])
,dict(attrs={'class':'buttonheading'})
]
remove_tags_after = dict(attrs={'class':'article_separator'})
remove_attributes = ['width','height']
def get_browser(self):
br = BasicNewsRecipe.get_browser()
br.open(self.INDEX)
return br
feeds = [(u'Articles', u'http://www.nspm.rs/nspm-in-english/feed/rss.html')]
keep_only_tags = [dict(name='div', attrs={'id':'jsn-mainbody'})]
remove_tags = [dict(name='div', attrs={'id':'yvComment' })]
feeds = [ (u'NSPM in English', u'http://nspm.rs/nspm-in-english/feed/rss.html')]
def preprocess_html(self, soup):
for item in soup.body.findAll(style=True):
del item['style']
return self.adeify_images(soup)

View File

@ -0,0 +1,49 @@
from calibre.web.feeds.news import BasicNewsRecipe
import re
class Skeptic(BasicNewsRecipe):
title = u'The Skeptic'
description = 'Discussions with leading experts and investigation of fringe science and paranormal claims.'
language = 'en'
__author__ = 'Starson17'
oldest_article = 31
cover_url = 'http://www.skeptricks.com/images/Skeptic_Magazine.jpg'
remove_empty_feeds = True
remove_javascript = True
max_articles_per_feed = 50
no_stylesheets = True
remove_tags = [dict(name='div', attrs={'class':['Introduction','divider']}),
dict(name='div', attrs={'id':['feature', 'podcast']}),
dict(name='div', attrs={'id':re.compile(r'follow.*', re.DOTALL|re.IGNORECASE)}),
dict(name='hr'),
]
feeds = [
('The Skeptic', 'http://www.skeptic.com/feed'),
('E-Skeptic', 'http://www.skeptic.com/eskeptic'),
('All-SkepticBlog', 'http://skepticblog.org/feed'),
('Brian Dunning', 'http://skepticblog.org/author/dunning/feed/'),
('Daniel Loxton', 'http://skepticblog.org/author/loxton/feed/'),
('Kirsten Sanford', 'http://skepticblog.org/author/sanford/feed/'),
('Mark Edward', 'http://skepticblog.org/author/edward/feed/'),
('Michael Shermer', 'http://skepticblog.org/author/shermer/feed/'),
('Phil Plait', 'http://skepticblog.org/author/plait/feed/'),
('Ryan Johnson', 'http://skepticblog.org/author/johnson/feed/'),
('Steven Novella', 'http://skepticblog.org/author/novella/feed/'),
('Yau-Man Chan', 'http://skepticblog.org/author/chan/feed/'),
]
def get_browser(self):
br = BasicNewsRecipe.get_browser(self)
br.addheaders = [('Accept', 'text/html')]
return br
extra_css = '''
h1{font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:large;}
h2{font-family:Arial,Helvetica,sans-serif; font-weight:normal;font-size:small;}
p{font-family:Arial,Helvetica,sans-serif;font-size:small;}
body{font-family:Helvetica,Arial,sans-serif;font-size:small;}
'''

View File

@ -0,0 +1,50 @@
from calibre.web.feeds.news import BasicNewsRecipe
import re
class TheSkepticalInquirer(BasicNewsRecipe):
title = u'The Skeptical Inquirer'
description = 'Investigation of fringe science and paranormal claims.'
language = 'en'
__author__ = 'Starson17'
oldest_article = 31
cover_url = 'http://www.skeptricks.com/images/Skeptical_Inquirer_Magazine.jpg'
remove_empty_feeds = True
remove_javascript = True
max_articles_per_feed = 50
no_stylesheets = True
keep_only_tags = [dict(name='div', attrs={'id':['content', 'bio']})]
remove_tags = [
dict(name='div', attrs={'id':['socialMedia']}),
]
preprocess_regexps = [
(re.compile(r'\.\(JavaScript must be enabled to view this email address\)', re.DOTALL|re.IGNORECASE), lambda match: ''),
]
def parse_index(self):
feeds = []
for title, url in [("The Skeptical Inquirer", "http://www.csicop.org")]:
articles = self.make_links(url)
if articles:
feeds.append((title, articles))
return feeds
def make_links(self, url):
soup = self.index_to_soup(url)
title = ''
current_articles = []
for item in soup.findAll(attrs={'class':['article-single bigger']}):
page_url = url + str(item.a["href"])
title = str(item.a.string)
current_articles.append({'title': title, 'url': page_url, 'description':'', 'date':''})
return current_articles
extra_css = '''
h1{font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:large;}
h2{font-family:Arial,Helvetica,sans-serif; font-weight:normal;font-size:small;}
p{font-family:Arial,Helvetica,sans-serif;font-size:small;}
body{font-family:Helvetica,Arial,sans-serif;font-size:small;}
'''

View File

@ -1,314 +0,0 @@
#!/usr/bin/env python
__license__ = 'GPL v3'
'''
online.wsj.com
'''
import re
from calibre.web.feeds.recipes import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import Tag, NavigableString
from datetime import timedelta, date
class WSJ(BasicNewsRecipe):
# formatting adapted from original recipe by Kovid Goyal and Sujata Raman
title = u'Wall Street Journal (free)'
__author__ = 'Nick Redding'
language = 'en'
description = ('All the free content from the Wall Street Journal (business, financial and political news)')
no_stylesheets = True
timefmt = ' [%b %d]'
# customization notes: delete sections you are not interested in
# set omit_paid_content to False if you want the paid content article snippets
# set oldest_article to the maximum number of days back from today to include articles
sectionlist = [
['/home-page','Front Page'],
['/public/page/news-opinion-commentary.html','Commentary'],
['/public/page/news-global-world.html','World News'],
['/public/page/news-world-business.html','US News'],
['/public/page/news-business-us.html','Business'],
['/public/page/news-financial-markets-stock.html','Markets'],
['/public/page/news-tech-technology.html','Technology'],
['/public/page/news-personal-finance.html','Personal Finnce'],
['/public/page/news-lifestyle-arts-entertainment.html','Life & Style'],
['/public/page/news-real-estate-homes.html','Real Estate'],
['/public/page/news-career-jobs.html','Careers'],
['/public/page/news-small-business-marketing.html','Small Business']
]
oldest_article = 2
omit_paid_content = True
extra_css = '''h1{font-size:large; font-family:Times,serif;}
h2{font-family:Times,serif; font-size:small; font-style:italic;}
.subhead{font-family:Times,serif; font-size:small; font-style:italic;}
.insettipUnit {font-family:Times,serif;font-size:xx-small;}
.targetCaption{font-size:x-small; font-family:Times,serif; font-style:italic; margin-top: 0.25em;}
.article{font-family:Times,serif; font-size:x-small;}
.tagline { font-size:xx-small;}
.dateStamp {font-family:Times,serif;}
h3{font-family:Times,serif; font-size:xx-small;}
.byline {font-family:Times,serif; font-size:xx-small; list-style-type: none;}
.metadataType-articleCredits {list-style-type: none;}
h6{font-family:Times,serif; font-size:small; font-style:italic;}
.paperLocation{font-size:xx-small;}'''
remove_tags_before = dict({'class':re.compile('^articleHeadlineBox')})
remove_tags = [ dict({'id':re.compile('^articleTabs_tab_')}),
#dict(id=["articleTabs_tab_article", "articleTabs_tab_comments",
# "articleTabs_tab_interactive","articleTabs_tab_video",
# "articleTabs_tab_map","articleTabs_tab_slideshow"]),
{'class': ['footer_columns','network','insetCol3wide','interactive','video','slideshow','map',
'insettip','insetClose','more_in', "insetContent",
# 'articleTools_bottom','articleTools_bottom mjArticleTools',
'aTools', 'tooltip',
'adSummary', 'nav-inline','insetFullBracket']},
dict({'class':re.compile('^articleTools_bottom')}),
dict(rel='shortcut icon')
]
remove_tags_after = [dict(id="article_story_body"), {'class':"article story"}]
def get_browser(self):
br = BasicNewsRecipe.get_browser()
return br
def preprocess_html(self,soup):
def decode_us_date(datestr):
udate = datestr.strip().lower().split()
m = ['january','february','march','april','may','june','july','august','september','october','november','december'].index(udate[0])+1
d = int(udate[1])
y = int(udate[2])
return date(y,m,d)
# check if article is paid content
if self.omit_paid_content:
divtags = soup.findAll('div','tooltip')
if divtags:
for divtag in divtags:
if divtag.find(text="Subscriber Content"):
return None
# check if article is too old
datetag = soup.find('li',attrs={'class' : re.compile("^dateStamp")})
if datetag:
dateline_string = self.tag_to_string(datetag,False)
date_items = dateline_string.split(',')
datestring = date_items[0]+date_items[1]
article_date = decode_us_date(datestring)
earliest_date = date.today() - timedelta(days=self.oldest_article)
if article_date < earliest_date:
self.log("Skipping article dated %s" % datestring)
return None
datetag.parent.extract()
# place dateline in article heading
bylinetag = soup.find('h3','byline')
if bylinetag:
h3bylinetag = bylinetag
else:
bylinetag = soup.find('li','byline')
if bylinetag:
h3bylinetag = bylinetag.h3
if not h3bylinetag:
h3bylinetag = bylinetag
bylinetag = bylinetag.parent
if bylinetag:
if h3bylinetag.a:
bylinetext = 'By '+self.tag_to_string(h3bylinetag.a,False)
else:
bylinetext = self.tag_to_string(h3bylinetag,False)
h3byline = Tag(soup,'h3',[('class','byline')])
if bylinetext.isspace() or (bylinetext == ''):
h3byline.insert(0,NavigableString(date_items[0]+','+date_items[1]))
else:
h3byline.insert(0,NavigableString(bylinetext+u'\u2014'+date_items[0]+','+date_items[1]))
bylinetag.replaceWith(h3byline)
else:
headlinetag = soup.find('div',attrs={'class' : re.compile("^articleHeadlineBox")})
if headlinetag:
dateline = Tag(soup,'h3', [('class','byline')])
dateline.insert(0,NavigableString(date_items[0]+','+date_items[1]))
headlinetag.insert(len(headlinetag),dateline)
else: # if no date tag, don't process this page--it's not a news item
return None
# This gets rid of the annoying superfluous bullet symbol preceding columnist bylines
ultag = soup.find('ul',attrs={'class' : 'cMetadata metadataType-articleCredits'})
if ultag:
a = ultag.h3
if a:
ultag.replaceWith(a)
return soup
def parse_index(self):
articles = {}
key = None
ans = []
def parse_index_page(page_name,page_title):
def article_title(tag):
atag = tag.find('h2') # title is usually in an h2 tag
if not atag: # if not, get text from the a tag
atag = tag.find('a',href=True)
if not atag:
return ''
t = self.tag_to_string(atag,False)
if t == '':
# sometimes the title is in the second a tag
atag.extract()
atag = tag.find('a',href=True)
if not atag:
return ''
return self.tag_to_string(atag,False)
return t
return self.tag_to_string(atag,False)
def article_author(tag):
atag = tag.find('strong') # author is usually in a strong tag
if not atag:
atag = tag.find('h4') # if not, look for an h4 tag
if not atag:
return ''
return self.tag_to_string(atag,False)
def article_summary(tag):
atag = tag.find('p')
if not atag:
return ''
subtag = atag.strong
if subtag:
subtag.extract()
return self.tag_to_string(atag,False)
def article_url(tag):
atag = tag.find('a',href=True)
if not atag:
return ''
url = re.sub(r'\?.*', '', atag['href'])
return url
def handle_section_name(tag):
# turns a tag into a section name with special processing
# for Wat's News, U.S., World & U.S. and World
s = self.tag_to_string(tag,False)
if ("What" in s) and ("News" in s):
s = "What's News"
elif (s == "U.S.") or (s == "World & U.S.") or (s == "World"):
s = s + " News"
return s
mainurl = 'http://online.wsj.com'
pageurl = mainurl+page_name
#self.log("Page url %s" % pageurl)
soup = self.index_to_soup(pageurl)
# Find each instance of div with class including "headlineSummary"
for divtag in soup.findAll('div',attrs={'class' : re.compile("^headlineSummary")}):
# divtag contains all article data as ul's and li's
# first, check if there is an h3 tag which provides a section name
stag = divtag.find('h3')
if stag:
if stag.parent.get('class', '') == 'dynamic':
# a carousel of articles is too complex to extract a section name
# for each article, so we'll just call the section "Carousel"
section_name = 'Carousel'
else:
section_name = handle_section_name(stag)
else:
section_name = "What's News"
#self.log("div Section %s" % section_name)
# find each top-level ul in the div
# we don't restrict to class = newsItem because the section_name
# sometimes changes via a ul tag inside the div
for ultag in divtag.findAll('ul',recursive=False):
stag = ultag.find('h3')
if stag:
if stag.parent.name == 'ul':
# section name has changed
section_name = handle_section_name(stag)
#self.log("ul Section %s" % section_name)
# delete the h3 tag so it doesn't get in the way
stag.extract()
# find each top level li in the ul
for litag in ultag.findAll('li',recursive=False):
stag = litag.find('h3')
if stag:
# section name has changed
section_name = handle_section_name(stag)
#self.log("li Section %s" % section_name)
# delete the h3 tag so it doesn't get in the way
stag.extract()
# if there is a ul tag inside the li it is superfluous;
# it is probably a list of related articles
utag = litag.find('ul')
if utag:
utag.extract()
# now skip paid subscriber articles if desired
subscriber_tag = litag.find(text="Subscriber Content")
if subscriber_tag:
if self.omit_paid_content:
continue
# delete the tip div so it doesn't get in the way
tiptag = litag.find("div", { "class" : "tipTargetBox" })
if tiptag:
tiptag.extract()
h1tag = litag.h1
# if there's an h1 tag, it's parent is a div which should replace
# the li tag for the analysis
if h1tag:
litag = h1tag.parent
h5tag = litag.h5
if h5tag:
# section mame has changed
section_name = self.tag_to_string(h5tag,False)
#self.log("h5 Section %s" % section_name)
# delete the h5 tag so it doesn't get in the way
h5tag.extract()
url = article_url(litag)
if url == '':
continue
if url.startswith("/article"):
url = mainurl+url
if not url.startswith("http://online.wsj.com"):
continue
if not url.endswith(".html"):
continue
if 'video' in url:
continue
title = article_title(litag)
if title == '':
continue
#self.log("URL %s" % url)
#self.log("Title %s" % title)
pubdate = ''
#self.log("Date %s" % pubdate)
author = article_author(litag)
if author == '':
author = section_name
elif author == section_name:
author = ''
else:
author = section_name+': '+author
#if not author == '':
# self.log("Author %s" % author)
description = article_summary(litag)
#if not description == '':
# self.log("Description %s" % description)
if not articles.has_key(page_title):
articles[page_title] = []
articles[page_title].append(dict(title=title,url=url,date=pubdate,description=description,author=author,content=''))
for page_name,page_title in self.sectionlist:
parse_index_page(page_name,page_title)
ans.append(page_title)
ans = [(key, articles[key]) for key in ans if articles.has_key(key)]
return ans

View File

@ -0,0 +1,34 @@
from calibre.web.feeds.news import BasicNewsRecipe
import re
class YahooNews(BasicNewsRecipe):
title = 'Yahoo News'
__author__ = 'Starson17'
description = 'Yahoo-Science'
language = 'en'
use_embedded_content= False
no_stylesheets = True
linearize_tables = True
oldest_article = 24
remove_javascript = True
remove_empty_feeds = True
max_articles_per_feed = 10
feeds = [#There are dozens of other feeds at http://news.yahoo.com/rss
(u'Top Stories', u'http://rss.news.yahoo.com/rss/topstories'),
(u'Science', u'http://rss.news.yahoo.com/rss/science')
]
keep_only_tags = [dict(name='div', attrs={'id':'yn-story'})]
remove_tags = [dict(name='div', attrs={'class':['hd', 'ft', 'yn-share-social']}),
dict(name='div', attrs={'id':['yn-story-minor-media']})]
preprocess_regexps = [(re.compile(r'<span>Play Video</span>', re.DOTALL),lambda match: '<span></span>')]
extra_css = '''
h1{font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:large;}
h2{font-family:Arial,Helvetica,sans-serif; font-weight:normal;font-size:small;}
p{font-family:Arial,Helvetica,sans-serif;font-size:small;}
body{font-family:Helvetica,Arial,sans-serif;font-size:small;}
'''

View File

@ -19,8 +19,8 @@ class ANDROID(USBMS):
VENDOR_ID = {
# HTC
0x0bb4 : { 0x0c02 : [0x100, 0x227], 0x0c01 : [0x100, 0x227], 0x0ff9
: [0x0100, 0x227]},
0x0bb4 : { 0x0c02 : [0x100, 0x0227], 0x0c01 : [0x100, 0x0227], 0x0ff9
: [0x0100, 0x0227, 0x0226]},
# Motorola
0x22b8 : { 0x41d9 : [0x216], 0x2d67 : [0x100], 0x41db : [0x216],

View File

@ -55,7 +55,7 @@ class WinPNPScanner(object):
def drive_order(self, pnp_id):
order = 0
match = re.search(r'REV_.*?&(\d+)', pnp_id)
match = re.search(r'REV_.*?&(\d+)#', pnp_id)
if match is not None:
order = int(match.group(1))
return order

View File

@ -931,7 +931,7 @@ class SaveToDiskAction(object): # {{{
lpath = self.library_view.model().db.library_path.replace('/', os.sep)
if dpath.startswith(lpath):
return error_dialog(self, _('Not allowed'),
_('You are tying to save files into the calibre '
_('You are trying to save files into the calibre '
'library. This can cause corruption of your '
'library. Save to disk is meant to export '
'files from your calibre library elsewhere.'), show=True)

View File

@ -31,7 +31,14 @@
</widget>
</item>
<item>
<widget class="QComboBox" name="input_formats"/>
<widget class="QComboBox" name="input_formats">
<property name="sizeAdjustPolicy">
<enum>QComboBox::AdjustToMinimumContentsLengthWithIcon</enum>
</property>
<property name="minimumContentsLength">
<number>5</number>
</property>
</widget>
</item>
<item>
<widget class="QCheckBox" name="opt_individual_saved_settings">
@ -64,7 +71,14 @@
</widget>
</item>
<item>
<widget class="QComboBox" name="output_formats"/>
<widget class="QComboBox" name="output_formats">
<property name="sizeAdjustPolicy">
<enum>QComboBox::AdjustToMinimumContentsLengthWithIcon</enum>
</property>
<property name="minimumContentsLength">
<number>5</number>
</property>
</widget>
</item>
</layout>
</item>
@ -115,8 +129,8 @@
<rect>
<x>0</x>
<y>0</y>
<width>810</width>
<height>489</height>
<width>805</width>
<height>484</height>
</rect>
</property>
<layout class="QVBoxLayout" name="verticalLayout_3">

View File

@ -118,6 +118,7 @@ class DeviceManager(Thread): # {{{
self.jobs = Queue.Queue(0)
self.keep_going = True
self.job_manager = job_manager
self.reported_errors = set([])
self.current_job = None
self.scanner = DeviceScanner()
self.connected_device = None
@ -141,13 +142,16 @@ class DeviceManager(Thread): # {{{
for dev, detected_device in connected_devices:
if dev.OPEN_FEEDBACK_MESSAGE is not None:
self.open_feedback_slot(dev.OPEN_FEEDBACK_MESSAGE)
try:
dev.reset(detected_device=detected_device,
report_progress=self.report_progress)
try:
dev.open()
except:
tb = traceback.format_exc()
if DEBUG or tb not in self.reported_errors:
self.reported_errors.add(tb)
prints('Unable to open device', str(dev))
traceback.print_exc()
prints(tb)
continue
self.connected_device = dev
self.connected_device_kind = device_kind
@ -192,10 +196,12 @@ class DeviceManager(Thread): # {{{
if possibly_connected_devices:
if not self.do_connect(possibly_connected_devices,
device_kind='device'):
if DEBUG:
prints('Connect to device failed, retrying in 5 seconds...')
time.sleep(5)
if not self.do_connect(possibly_connected_devices,
device_kind='usb'):
if DEBUG:
prints('Device connect failed again, giving up')
# Mount devices that don't use USB, such as the folder device and iTunes

View File

@ -79,7 +79,7 @@ class ChooseLibrary(QDialog, Ui_Dialog):
if not text:
return error_dialog(self, _('No location'), _('No location selected'),
show=True)
loc = os.path.abspath()
loc = os.path.abspath(text)
if not loc or not os.path.exists(loc) or not self.check_action(action,
loc):
return

View File

@ -48,17 +48,42 @@ class EditAuthorsDialog(QDialog, Ui_EditAuthorsDialog):
select_item = sort
self.table.resizeColumnsToContents()
# set up the signal after the table is filled
# set up the cellChanged signal only after the table is filled
self.table.cellChanged.connect(self.cell_changed)
self.table.setSortingEnabled(True)
# set up sort buttons
self.sort_by_author.setCheckable(True)
self.sort_by_author.setChecked(False)
self.sort_by_author.clicked.connect(self.do_sort_by_author)
self.author_order = 1
self.table.sortByColumn(1, Qt.AscendingOrder)
self.sort_by_author_sort.clicked.connect(self.do_sort_by_author_sort)
self.sort_by_author_sort.setCheckable(True)
self.sort_by_author_sort.setChecked(True)
self.author_sort_order = 1
# set up author sort calc button
self.recalc_author_sort.clicked.connect(self.do_recalc_author_sort)
if select_item is not None:
self.table.setCurrentItem(select_item)
self.table.editItem(select_item)
else:
self.table.setCurrentCell(0, 0)
def do_sort_by_author(self):
self.author_order = 1 if self.author_order == 0 else 0
self.table.sortByColumn(0, self.author_order)
self.sort_by_author.setChecked(True)
self.sort_by_author_sort.setChecked(False)
def do_sort_by_author_sort(self):
self.author_sort_order = 1 if self.author_sort_order == 0 else 0
self.table.sortByColumn(1, self.author_sort_order)
self.sort_by_author.setChecked(False)
self.sort_by_author_sort.setChecked(True)
def accepted(self):
self.result = []
for row in range(0,self.table.rowCount()):
@ -69,6 +94,17 @@ class EditAuthorsDialog(QDialog, Ui_EditAuthorsDialog):
if orig_aut != aut or orig_sort != sort:
self.result.append((id, orig_aut, aut, sort))
def do_recalc_author_sort(self):
self.table.cellChanged.disconnect()
for row in range(0,self.table.rowCount()):
item = self.table.item(row, 0)
aut = unicode(item.text()).strip()
c = self.table.item(row, 1)
# Sometimes trailing commas are left by changing between copy algs
c.setText(author_to_author_sort(aut).rstrip(','))
self.table.setFocus(Qt.OtherFocusReason)
self.table.cellChanged.connect(self.cell_changed)
def cell_changed(self, row, col):
if col == 0:
item = self.table.item(row, 0)
@ -79,8 +115,4 @@ class EditAuthorsDialog(QDialog, Ui_EditAuthorsDialog):
else:
item = self.table.item(row, 1)
self.table.setCurrentItem(item)
# disable and reenable sorting to force the sort now, so we can scroll
# to the item after it moves
self.table.setSortingEnabled(False)
self.table.setSortingEnabled(True)
self.table.scrollToItem(item)

View File

@ -6,7 +6,7 @@
<rect>
<x>0</x>
<y>0</y>
<width>730</width>
<width>768</width>
<height>342</height>
</rect>
</property>
@ -33,8 +33,53 @@
</property>
</widget>
</item>
<item>
<layout class="QHBoxLayout" name="horizontalLayout">
<item>
<widget class="QPushButton" name="sort_by_author">
<property name="text">
<string>Sort by author</string>
</property>
</widget>
</item>
<item>
<widget class="QPushButton" name="sort_by_author_sort">
<property name="text">
<string>Sort by author sort</string>
</property>
</widget>
</item>
<item>
<widget class="QPushButton" name="recalc_author_sort">
<property name="toolTip">
<string>Reset all the author sort values to a value automatically generated from the author. Exactly how this value is automatically generated can be controlled via Preferences-&gt;Advanced-&gt;Tweaks</string>
</property>
<property name="text">
<string>Recalculate all author sort values</string>
</property>
</widget>
</item>
<item>
<spacer name="horizontalSpacer_3">
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
<property name="sizeHint" stdset="0">
<size>
<width>40</width>
<height>20</height>
</size>
</property>
</spacer>
</item>
<item>
<widget class="QDialogButtonBox" name="buttonBox">
<property name="sizePolicy">
<sizepolicy hsizetype="Minimum" vsizetype="Fixed">
<horstretch>0</horstretch>
<verstretch>0</verstretch>
</sizepolicy>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
@ -42,11 +87,13 @@
<set>QDialogButtonBox::Cancel|QDialogButtonBox::Ok</set>
</property>
<property name="centerButtons">
<bool>true</bool>
<bool>false</bool>
</property>
</widget>
</item>
</layout>
</item>
</layout>
</widget>
<resources/>
<connections>

View File

@ -675,9 +675,7 @@ class Wizard(QWizard):
self.connect(self.library_page, SIGNAL('retranslate()'),
self.retranslate)
self.finish_page = FinishPage()
bt = unicode(self.buttonText(self.FinishButton)).replace('&', '')
t = unicode(self.finish_page.finish_text.text())
self.finish_page.finish_text.setText(t%bt)
self.set_finish_text()
self.kindle_page = KindlePage()
self.stanza_page = StanzaPage()
self.word_player_page = WordPlayerPage()
@ -702,6 +700,7 @@ class Wizard(QWizard):
for pid in self.pageIds():
page = self.page(pid)
page.retranslateUi(page)
self.set_finish_text()
def accept(self):
pages = map(self.page, self.visitedPages())
@ -715,6 +714,13 @@ class Wizard(QWizard):
def completed(self, newloc):
return QWizard.accept(self)
def set_finish_text(self, *args):
bt = unicode(self.buttonText(self.FinishButton)).replace('&', '')
t = unicode(self.finish_page.finish_text.text())
if '%s' in t:
self.finish_page.finish_text.setText(t%bt)
def wizard(parent=None):
w = Wizard(parent)
return w

View File

@ -542,6 +542,8 @@ class ResultCache(SearchQueryParser):
if field is not None:
self.sort(field, ascending)
self._map_filtered = list(self._map)
if self.search_restriction:
self.search('', return_matches=False, ignore_search_restriction=False)
def seriescmp(self, sidx, siidx, x, y, library_order=None):
try:

View File

@ -15,6 +15,7 @@ from calibre import prepare_string_for_xml
# Hackish - ignoring sentences ending or beginning in numbers to avoid
# confusion with decimal points.
lost_cr_pat = re.compile('([a-z])([\.\?!])([A-Z])')
lost_cr_exception_pat = re.compile(r'(Ph\.D)|(D\.Phil)|((Dr|Mr|Mrs|Ms)\.[A-Z])')
def comments_to_html(comments):
'''
@ -51,6 +52,8 @@ def comments_to_html(comments):
return '\n'.join(parts)
# Explode lost CRs to \n\n
comments = lost_cr_exception_pat.sub(lambda m: m.group().replace('.',
'.\r'), comments)
for lost_cr in lost_cr_pat.finditer(comments):
comments = comments.replace(lost_cr.group(),
'%s%s\n\n%s' % (lost_cr.group(1),

View File

@ -317,6 +317,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
'title', 'timestamp', 'uuid', 'pubdate'):
setattr(self, prop, functools.partial(get_property,
loc=self.FIELD_MAP['comments' if prop == 'comment' else prop]))
setattr(self, 'title_sort', functools.partial(get_property,
loc=self.FIELD_MAP['sort']))
def initialize_database(self):
metadata_sqlite = open(P('metadata_sqlite.sql'), 'rb').read()
@ -494,6 +496,7 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
mi.timestamp = self.timestamp(idx, index_is_id=index_is_id)
mi.pubdate = self.pubdate(idx, index_is_id=index_is_id)
mi.uuid = self.uuid(idx, index_is_id=index_is_id)
mi.title_sort = self.title_sort(idx, index_is_id=index_is_id)
tags = self.tags(idx, index_is_id=index_is_id)
if tags:
mi.tags = [i.strip() for i in tags.split(',')]

View File

@ -177,7 +177,7 @@ def extract(path, dir):
try:
if open_archive_data.OpenResult != 0:
raise UnRARException(_interpret_open_error(open_archive_data.OpenResult, path))
prints('Archive:', path)
#prints('Archive:', path)
#print get_archive_info(open_archive_data.Flags)
header_data = RARHeaderDataEx(CmtBuf=None)
#_libunrar.RARSetCallback(arc_data, callback_func, mode)

File diff suppressed because it is too large Load Diff