Update Boston Globe

This commit is contained in:
Kovid Goyal 2020-09-06 11:23:22 +05:30
parent f777b3c5c5
commit 6054525b63
No known key found for this signature in database
GPG Key ID: 06BC317B515ACE7C

View File

@ -1,7 +1,4 @@
import re
from calibre.web.feeds.recipes import BasicNewsRecipe from calibre.web.feeds.recipes import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import Tag, NavigableString
from datetime import date, timedelta
def classes(classes): def classes(classes):
@ -10,13 +7,6 @@ def classes(classes):
'class': lambda x: x and frozenset(x.split()).intersection(q)}) 'class': lambda x: x and frozenset(x.split()).intersection(q)})
def new_tag(soup, name, attrs=()):
impl = getattr(soup, 'new_tag', None)
if impl is not None:
return impl(name, attrs=dict(attrs))
return Tag(soup, name, attrs=attrs or None)
def class_as_string(x): def class_as_string(x):
if isinstance(x, (list, tuple)): if isinstance(x, (list, tuple)):
x = ' '.join(x) x = ' '.join(x)
@ -36,18 +26,38 @@ def class_startswith(*prefixes):
return dict(attrs={'class': q}) return dict(attrs={'class': q})
# From: https://www3.bostonglobe.com/lifestyle/comics?arc404=true
comics_to_fetch = {
"ADAM@HOME": 'ad',
"ARLO & JANIS": 'aj',
# "CUL DE SAC": 'cds',
# "CURTIS": 'kfcrt',
"DILBERT": 'dt',
"DOONESBURY": 'db',
"DUSTIN": 'kfdus',
"F MINUS": 'fm',
"FOR BETTER OR WORSE": 'fb',
# "GET FUZZY": 'gz',
# "MOTHER GOOSE & GRIMM": 'tmmgg',
# "JUMPSTART": 'jt',
"MONTY": 'mt',
# "POOCH CAFE",
"RHYMES WITH ORANGE": 'kfrwo',
# "ROSE IS ROSE": 'rr',
# "ZIPPY THE PINHEAD": 'kfzpy',
"ZITS": 'kfzt'
}
class BostonGlobeSubscription(BasicNewsRecipe): class BostonGlobeSubscription(BasicNewsRecipe):
title = "Boston Globe Subscription" title = "Boston Globe"
__author__ = 'Rob Freundlich' __author__ = 'Kovid Goyal'
description = 'Boston Globe with full articles for subscribers' description = 'The Boston Globe'
language = 'en' language = 'en'
INDEX = 'https://www3.bostonglobe.com/todayspaper/%Y/%m/%d?arc404=true'
todaysDate = date.today().strftime("%d/%m/%Y")
timefmt = ' [%a, %d %b, %Y]' timefmt = ' [%a, %d %b, %Y]'
keep_only_tags = [ keep_only_tags = [
class_startswith('headline |', 'subheader |', 'byline |', 'image |', 'lead |', 'body |'), class_startswith('headline |', 'subheader |', 'byline |', 'image |', 'lead |', 'body |', 'comic-debug'),
classes('comic article__title methode__story article-header__headline lead-media figure article-header__byline article-content'),
] ]
remove_tags = [ remove_tags = [
classes('inline-newsletter ad skip-nav article-footer sharebar arc_ad'), classes('inline-newsletter ad skip-nav article-footer sharebar arc_ad'),
@ -58,33 +68,6 @@ class BostonGlobeSubscription(BasicNewsRecipe):
remove_attributes = ['style'] remove_attributes = ['style']
no_stylesheets = True no_stylesheets = True
# simultaneous_downloads = 1 # simultaneous_downloads = 1
comics_to_fetch = {
"ADAM@HOME",
"ARLO & JANIS",
# "ASK SHAGG",
# "CUL DE SAC",
# "CURTIS",
"DILBERT",
"DOONESBURY",
"DUSTIN",
# "THE FAMILY CIRCUS",
"F MINUS",
"FOR BETTER OR WORSE",
"FOXTROT",
# "GET FUZZY",
# "MOTHER GOOSE & GRIMM",
# "IN THE STICKS",
# "JUMPSTART",
"MONTY",
"NON SEQUITUR",
"PICKLES",
# "POOCH CAFE",
"RHYMES WITH ORANGE",
# "ROSE IS ROSE",
"STONE SOUP",
# "ZIPPY THE PINHEAD",
"ZITS"
}
def image_url_processor(self, baseurl, url): def image_url_processor(self, baseurl, url):
return self.absolutize_url(url) return self.absolutize_url(url)
@ -97,162 +80,48 @@ class BostonGlobeSubscription(BasicNewsRecipe):
return url return url
def parse_index(self): def parse_index(self):
# self.logger.setLevel(logging.WARNING)
feeds = [] feeds = []
try: soup = self.index_to_soup('https://www.bostonglobe.com/todays-paper/')
index = date.today().strftime(self.INDEX) # soup = self.index_to_soup('file:///t/raw.html')
self.log("Getting today's paper from ", index) section = None
soup = self.index_to_soup(index) articles = []
except Exception:
self.todaysDate = (date.today() - timedelta(days=1))
index = self.todaysDate.strftime(self.INDEX)
self.log("Getting today's paper from ", index)
soup = self.index_to_soup(index)
def get_top_stories(): for h in soup.findAll(['h2', 'h4']):
self.log("Getting Top Stories") if h.name == 'h4':
articles = [] if section and articles:
topStoriesDiv = soup.find("div", {"class": "stories-top"}) feeds.append((section, articles))
stories = topStoriesDiv.findAll("div", {"class": lambda x: x and 'story' in x.split()}) section = self.tag_to_string(h)
for story in stories: articles = []
h2 = story.find("h2", {"class": 'story-title'}) if section.lower().startswith('jump'):
link = story.find("a", {'class': 'story-perm'}) section = None
if h2 is not None and link is not None:
for img in h2.findAll('img'):
img.extract()
title = self.tag_to_string(h2)
url = self.absolutize_url(link["href"])
excerpt_div = story.find("div", {"class": "excerpt"})
excerpt = self.tag_to_string(excerpt_div)
self.log('\t', title, '[%s]' % url)
self.log('\t\t', excerpt)
articles.append({"title": title, "url": self.absolutize_url(
url), "date": self.todaysDate, "description": excerpt})
if articles:
feeds.append(("Top Stories", articles))
def get_section(sectionDiv):
sectionHeader = sectionDiv.find("h2", "hed-section")
articles = []
feedTitle = self.tag_to_string(sectionHeader)
self.log("Getting", feedTitle)
excerpts = sectionDiv.findAll("div", "sec-excerpt")
for excerpt in excerpts:
# Stories here follow similar forms to top-stories (above)
storyTitle = excerpt.find("h3", "story-title")
if (storyTitle.parent.name == "a"):
a = storyTitle.parent
url = a["href"]
title = self.tag_to_string(storyTitle)
else: else:
a = storyTitle.find("a") self.log(section)
url = a["href"] continue
title = self.tag_to_string(a) if not section:
continue
hedCat = excerpt.find("p", "hed-cat") title = self.tag_to_string(h)
if (hedCat): a = h.findParent('a', href=True)
category = self.tag_to_string(hedCat) url = self.absolutize_url(a['href'])
desc = ''
author = '' q = h.findNextSibling('div', **classes('deck'))
authorHeader = excerpt.find("h4", "author") if q is not None:
if (authorHeader): desc = self.tag_to_string(q)
author = self.tag_to_string(authorHeader) articles.append({'title': title, 'url': url, 'description': desc})
self.log('\t', title, url)
if (category != "") & (category != " "):
title = category + ": " + title
description = ""
for para in excerpt.findAll("p"):
if (para != hedCat):
description += self.tag_to_string(para)
self.log('\t', title, '[%s]' % self.absolutize_url(url))
if description:
self.log('\t\t', description)
articles.append({"title": title, "url": self.absolutize_url(
url), "author": author, "date": self.todaysDate, "description": description})
if articles:
feeds.append((feedTitle, articles))
def get_comics():
articles = []
comicSoup = self.index_to_soup(
"https://www.bostonglobe.com/lifestyle/comics")
for personIndex in comicSoup.findAll("ol", {"class": re.compile("person-index.*")}):
for li in personIndex.findAll("li"):
title = self.tag_to_string(li.p)
if (title in self.comics_to_fetch):
url = li.a["href"]
author = self.tag_to_string(li.h2)
# comicPageSoup =
# self.index_to_soup(self.absolutize_url(url))
# imageURL = comicPageSoup.findAll("a", "comic")
# if len(imageURL) > 0:
# url = imageURL[0]["href"]
# print "COMIC %s: %s" % (title, url)
articles.append({"title": title, "url": self.absolutize_url(
url), "author": author, "date": self.todaysDate, "description": ""})
feeds.append(("Comics", articles))
get_top_stories()
for div in soup.findAll('div', {'class': 'tod-paper-section'}):
get_section(div)
get_comics()
if section and articles:
feeds.append((section, articles))
articles = []
for title, slug in comics_to_fetch.items():
articles.append({'title':title, 'url':'https://www.bostonglobe.com/games-comics/comics/{}/'.format(slug)})
if articles:
feeds.append(('Comics', articles))
return feeds return feeds
def postprocess_comics(self, soup, first):
main = soup.find("div", id="main")
sectionHead = main.find("div", "section-head")
title = sectionHead.h2
byline = sectionHead.h3
imgLink = main.find("a", "comic")
img = imgLink.img
body = new_tag(soup, "body")
body.insert(0, title)
body.insert(1, byline)
body.insert(2, img)
soup.body.replaceWith(body)
return soup
def preprocess_raw_html(self, raw, *a):
# open('/t/raw.html', 'wb').write(raw)
# The article content is present as JSON in one of th escript tags
# but I cant be bothered extracting it. News organizations need their
# heads examined
raw = re.sub(r'<script.+?</script>', '', raw, flags=re.DOTALL)
raw = re.sub(r'<svg.+?</svg>', '', raw, flags=re.DOTALL)
return raw
def preprocess_html(self, soup): def preprocess_html(self, soup):
body = soup.find('body') for img in soup.findAll('img'):
title = soup.find('title') fs = img.get('data-src')
t = type('')(title.contents[0]).partition('-')[0].strip()
del title.contents[0]
title.contents.append(NavigableString(t))
title.name = 'h1'
body.insert(0, title)
images = soup.findAll("img")
for img in images:
fs = img.get('data-fullsrc')
if fs: if fs:
img['src'] = fs remainder = fs.split('=')[-1].split('0')[-1]
src = img.get('src') img['src'] = 'https:/' + remainder
if src:
img['src'] = self.absolutize_url(src)
return soup
def postprocess_html(self, soup, first):
comicsBody = soup.find(
"body", {"class": re.compile(".*section-comics.*")})
if comicsBody:
return self.postprocess_comics(soup, first)
return soup return soup