mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-07 10:14:46 -04:00
Update Wired
This commit is contained in:
parent
59c865b79f
commit
8e85a16e4c
@ -1,107 +1,61 @@
|
|||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__copyright__ = '2010-2013, Darko Miletic <darko.miletic at gmail.com>'
|
__copyright__ = '2014, Darko Miletic <darko.miletic at gmail.com>'
|
||||||
'''
|
'''
|
||||||
www.wired.com
|
www.wired.com
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import re
|
|
||||||
from calibre import strftime
|
|
||||||
from calibre.web.feeds.news import BasicNewsRecipe
|
from calibre.web.feeds.news import BasicNewsRecipe
|
||||||
|
|
||||||
class Wired(BasicNewsRecipe):
|
class WiredDailyNews(BasicNewsRecipe):
|
||||||
title = 'Wired Magazine'
|
title = 'Wired Magazine, Monthly Edition'
|
||||||
__author__ = 'Darko Miletic'
|
__author__ = 'Darko Miletic, update by Zach Lapidus'
|
||||||
description = 'Gaming news'
|
description = ('Wired is a full-color monthly American magazine, published in both print '
|
||||||
publisher = 'Conde Nast Digital'
|
'and online editions, that reports on how emerging technologies affect culture,'
|
||||||
category = 'news, games, IT, gadgets'
|
'the economy and politics.')
|
||||||
oldest_article = 32
|
publisher = 'Conde Nast'
|
||||||
max_articles_per_feed = 100
|
category = 'news, IT, computers, technology'
|
||||||
|
oldest_article = 2
|
||||||
|
max_articles_per_feed = 200
|
||||||
no_stylesheets = True
|
no_stylesheets = True
|
||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
use_embedded_content = False
|
use_embedded_content = False
|
||||||
masthead_url = 'http://www.wired.com/images/home/wired_logo.gif'
|
|
||||||
language = 'en'
|
language = 'en'
|
||||||
publication_type = 'magazine'
|
ignore_duplicate_articles = {'url'}
|
||||||
extra_css = """
|
remove_empty_feeds = True
|
||||||
h1, .entry-header{font-family: brandon-grotesque,anchor-web,Helvetica,Arial,sans-serif}
|
publication_type = 'newsportal'
|
||||||
.entry-header{display: block;}
|
extra_css = """
|
||||||
.entry-header ul{ list-style-type:disc;}
|
.entry-header{
|
||||||
.author, .entryDate, .entryTime, .entryEdit, .entryCategories{display: inline}
|
text-transform: uppercase;
|
||||||
.entry-header li{text-transform: uppercase;}
|
vertical-align: baseline;
|
||||||
div#container{font-family: 'Exchange SSm 4r', Georgia, serif}
|
display: inline;
|
||||||
|
}
|
||||||
"""
|
"""
|
||||||
index = 'http://www.wired.com/magazine/'
|
|
||||||
|
|
||||||
preprocess_regexps = [(re.compile(r'<meta name="Title".*<title>', re.DOTALL|re.IGNORECASE),lambda match: '<title>')]
|
|
||||||
conversion_options = {
|
|
||||||
'comment' : description
|
|
||||||
, 'tags' : category
|
|
||||||
, 'publisher' : publisher
|
|
||||||
, 'language' : language
|
|
||||||
}
|
|
||||||
|
|
||||||
keep_only_tags = [dict(name='div', attrs={'class':'post'})]
|
|
||||||
remove_tags_after = dict(name='div', attrs={'id':'container'})
|
|
||||||
remove_tags = [
|
remove_tags = [
|
||||||
dict(name=['object','embed','iframe','link','meta','base'])
|
dict(name=['meta','link']),
|
||||||
,dict(name='div', attrs={'class':['social-top','podcast_storyboard','tweetmeme_button']})
|
dict(name='div', attrs={'class':'podcast_storyboard'}),
|
||||||
,dict(attrs={'id':'ff_bottom_nav'})
|
dict(id=['sharing', 'social', 'article-tags', 'sidebar']),
|
||||||
,dict(name='a',attrs={'href':'http://www.wired.com/app'})
|
|
||||||
,dict(name='div', attrs={'id':'mag-bug'})
|
|
||||||
]
|
]
|
||||||
remove_attributes = ['height','width','lang','border','clear']
|
keep_only_tags=[
|
||||||
|
dict(attrs={'data-js':['post', 'postHeader']}),
|
||||||
|
]
|
||||||
|
|
||||||
|
def parse_wired_index_page(self, num, seen):
|
||||||
|
soup = self.index_to_soup('http://www.wired.com/category/magazine/page/%d' % num)
|
||||||
|
for a in soup.find('main').findAll('a', href=True):
|
||||||
|
url = a['href']
|
||||||
|
if url.startswith('http://www.wired.com/') and url.endswith('/'):
|
||||||
|
title = self.tag_to_string(a.find('h2'))
|
||||||
|
dateloc = a.find('time')
|
||||||
|
date = self.tag_to_string(dateloc)
|
||||||
|
if title.lower() != 'read more' and title and url not in seen:
|
||||||
|
seen.add(url)
|
||||||
|
self.log('Found article:', title, 'in page:', num)
|
||||||
|
yield {'title':title, 'date':date, 'url':url, 'description':''}
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
totalfeeds = []
|
articles = []
|
||||||
soup = self.index_to_soup(self.index)
|
seen = set()
|
||||||
majorf = soup.find('div',attrs={'class':'entry'})
|
for num in (1, 2):
|
||||||
if majorf:
|
articles.extend(self.parse_wired_index_page(num, seen))
|
||||||
articles = []
|
return [('Articles', articles)]
|
||||||
checker = []
|
|
||||||
for a in majorf.findAll('a', href=True):
|
|
||||||
if a['href'].startswith('http://www.wired.com/') and a['href'].endswith('/'):
|
|
||||||
title = self.tag_to_string(a)
|
|
||||||
url = a['href']
|
|
||||||
if title.lower() != 'read more' and url not in checker:
|
|
||||||
checker.append(url)
|
|
||||||
articles.append({
|
|
||||||
'title' :title
|
|
||||||
,'date' :strftime(self.timefmt)
|
|
||||||
,'url' :a['href']
|
|
||||||
,'description':''
|
|
||||||
})
|
|
||||||
totalfeeds.append(('Articles', articles))
|
|
||||||
return totalfeeds
|
|
||||||
|
|
||||||
def get_cover_url(self):
|
|
||||||
cover_url = None
|
|
||||||
soup = self.index_to_soup(self.index)
|
|
||||||
cover_item = soup.find('div',attrs={'class':'spread-image'})
|
|
||||||
if cover_item:
|
|
||||||
cover_url = 'http://www.wired.com' + cover_item.a.img['src']
|
|
||||||
return cover_url
|
|
||||||
|
|
||||||
def print_version(self, url):
|
|
||||||
return url.rstrip('/') + '/all/1'
|
|
||||||
|
|
||||||
def preprocess_html(self, soup):
|
|
||||||
for item in soup.findAll(style=True):
|
|
||||||
del item['style']
|
|
||||||
for item in soup.findAll('a'):
|
|
||||||
if item.string is not None:
|
|
||||||
tstr = item.string
|
|
||||||
item.replaceWith(tstr)
|
|
||||||
else:
|
|
||||||
item.name='span'
|
|
||||||
for atrs in ['href','target','alt','title','name','id']:
|
|
||||||
if item.has_key(atrs):
|
|
||||||
del item[atrs]
|
|
||||||
for item in soup.findAll('img'):
|
|
||||||
if not item.has_key('alt'):
|
|
||||||
item['alt'] = 'image'
|
|
||||||
if item.has_key('data-lazy-src'):
|
|
||||||
item['src'] = item['data-lazy-src']
|
|
||||||
del item['data-lazy-src']
|
|
||||||
return soup
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user