This commit is contained in:
Kovid Goyal 2024-07-07 10:48:51 +05:30
commit eebbbf4782
No known key found for this signature in database
GPG Key ID: 06BC317B515ACE7C
7 changed files with 316 additions and 5 deletions

View File

@ -0,0 +1,102 @@
'''
https://ancientegyptmagazine.com
'''
from calibre import browser
from calibre.web.feeds.news import BasicNewsRecipe
class ancientegypt(BasicNewsRecipe):
title = 'The Past: Ancient Egypt Magazine'
language = 'en'
__author__ = 'unkn0wn'
description = (
'Ancient Egypt is the world\'s leading Egyptology magazine, exploring the history, people and culture of the Nile Valley. '
'Now in a larger format with a fresh new design, AE brings you the latest news and discoveries, and feature articles covering '
'more than 5000 years of Egyptian history. Published bimonthly.'
)
no_stylesheets = True
use_embedded_content = False
remove_attributes = ['style', 'height', 'width']
ignore_duplicate_articles = {'url'}
resolve_internal_links = True
masthead_url = 'https://ancientegyptmagazine.com/media/website/ae-logo-2.png'
simultaneous_downloads = 1
extra_css = '''
[class^="meta"] { font-size:small; }
.post-subtitle { font-style: italic; color:#202020; }
.wp-block-image { font-size:small; text-align:center; }
'''
keep_only_tags = [
dict(attrs={'class':lambda x: x and '__header' in x}),
dict(attrs={'class':lambda x: x and '__background' in x}),
dict(attrs={'class':lambda x: x and '__body_area' in x}),
]
remove_tags = [
dict(attrs={'class':'ad-break'}),
dict(attrs={'class':lambda x: x and 'avatar' in x.split()}),
dict(attrs={'class':lambda x: x and '--share' in x})
]
def preprocess_html(self, soup):
exp = soup.find(attrs={'class':lambda x: x and 'post-subtitle' in x.split()})
if exp:
exp.name = 'p'
return soup
def parse_index(self):
soup = self.index_to_soup('https://the-past.com/category/magazines/ae/')
art = soup.find('article', attrs={'class':lambda x: x and 'tag-magazines' in x.split()})
url = art.h2.a['href']
# for past editions, add url
# url = ''
issue = self.index_to_soup(url)
ti = issue.find('h1', attrs={'class':lambda x: x and 'post-title' in x.split()})
if ti:
self.title = self.tag_to_string(ti).strip()
dt = soup.find(attrs={'class':lambda x: x and '__date' in x})
if dt:
self.timefmt = ' [' + self.tag_to_string(dt).strip() + ']'
edit = issue.find('h2', attrs={'id':'from-the-editor'})
if edit and edit.findParent('div'):
self.description = self.tag_to_string(edit.findParent('div'))
cov = issue.find('figure', attrs={'class':lambda x: x and 'wp-block-image' in x.split()})
if cov:
self.cover_url = cov.img['src']
div = issue.find('div', attrs={'class':lambda x: x and 'entry-content' in x.split()})
feeds = []
h2 = div.findAll('h2', attrs={'class':lambda x: x and 'wp-block-heading' in x.split()})
lt = div.findAll(attrs={'class':'display-posts-listing'})
for x, y in zip(h2, lt):
section = self.tag_to_string(x).strip()
self.log(section)
articles = []
for a in y.findAll('a', href=True, attrs={'class':'title'}):
url = a['href']
title = self.tag_to_string(a).strip()
desc = ''
exp = a.findNext(attrs={'class':'excerpt'})
if exp:
desc = self.tag_to_string(exp).strip()
self.log('\t', title, '\n\t', desc, '\n\t\t', url)
articles.append({'title': title, 'description':desc, 'url': url})
if articles:
feeds.append((section, articles))
return feeds
def get_browser(self, *args, **kwargs):
return self
def clone_browser(self, *args, **kwargs):
return self.get_browser()
def open_novisit(self, *args, **kwargs):
br = browser()
return br.open_novisit(*args, **kwargs)
open = open_novisit

Binary file not shown.

After

Width:  |  Height:  |  Size: 491 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 380 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 382 B

View File

@ -5,7 +5,7 @@ from calibre import browser
from calibre.web.feeds.news import BasicNewsRecipe
class milthist(BasicNewsRecipe):
title = 'Military History Matters'
title = 'The Past: Military History Matters'
language = 'en'
__author__ = 'unkn0wn'
description = (
@ -53,14 +53,18 @@ class milthist(BasicNewsRecipe):
# url = ''
issue = self.index_to_soup(url)
ti = issue.find('h1', attrs={'class':lambda x: x and 'post-title' in x.split()})
if ti:
self.title = self.tag_to_string(ti).strip()
dt = soup.find(attrs={'class':lambda x: x and '__date' in x})
if dt:
self.timefmt = ' [' + self.tag_to_string(dt).strip() + ']'
edit = issue.find('h2', attrs={'id':'from-the-editor'})
if edit.findNext('p'):
self.description = self.tag_to_string(edit.findNext('p'))
if edit.findPrevious('figure'):
self.cover_url = edit.findPrevious('figure').img['src']
if edit and edit.findParent('div'):
self.description = self.tag_to_string(edit.findParent('div'))
cov = issue.find('figure', attrs={'class':lambda x: x and 'wp-block-image' in x.split()})
if cov:
self.cover_url = cov.img['src']
div = issue.find('div', attrs={'class':lambda x: x and 'entry-content' in x.split()})
feeds = []

View File

@ -0,0 +1,102 @@
'''
https://minervamagazine.com/
'''
from calibre import browser
from calibre.web.feeds.news import BasicNewsRecipe
class minerva(BasicNewsRecipe):
title = 'The Past: Minerva Magazine'
language = 'en'
__author__ = 'unkn0wn'
description = (
'Minerva, Archaeology and Ancient art, was a bi-monthly magazine publishing features on exhibitions, excavations, '
'and museums, interviews, news items, and book reviews. It ran from 1990 to 2023. So use this recipe to fetch past issues.'
)
no_stylesheets = True
use_embedded_content = False
remove_attributes = ['style', 'height', 'width']
ignore_duplicate_articles = {'url'}
resolve_internal_links = True
masthead_url = 'https://minervamagazine.com/media/website/Minerva-mastheadandstrap-black_323x80.png'
simultaneous_downloads = 1
extra_css = '''
[class^="meta"] { font-size:small; }
.post-subtitle { font-style: italic; color:#202020; }
.wp-block-image { font-size:small; text-align:center; }
'''
keep_only_tags = [
dict(attrs={'class':lambda x: x and '__header' in x}),
dict(attrs={'class':lambda x: x and '__background' in x}),
dict(attrs={'class':lambda x: x and '__body_area' in x}),
]
remove_tags = [
dict(attrs={'class':'ad-break'}),
dict(attrs={'class':lambda x: x and 'avatar' in x.split()}),
dict(attrs={'class':lambda x: x and '--share' in x})
]
def preprocess_html(self, soup):
exp = soup.find(attrs={'class':lambda x: x and 'post-subtitle' in x.split()})
if exp:
exp.name = 'p'
return soup
def parse_index(self):
soup = self.index_to_soup('https://the-past.com/category/magazines/minerva/')
art = soup.find('article', attrs={'class':lambda x: x and 'tag-magazines' in x.split()})
url = art.h2.a['href']
# for past editions, add url
# url = ''
issue = self.index_to_soup(url)
ti = issue.find('h1', attrs={'class':lambda x: x and 'post-title' in x.split()})
if ti:
self.title = self.tag_to_string(ti).strip()
dt = soup.find(attrs={'class':lambda x: x and '__date' in x})
if dt:
self.timefmt = ' [' + self.tag_to_string(dt).strip() + ']'
edit = issue.find('h2', attrs={'id':'from-the-editor'})
if edit and edit.findParent('div'):
self.description = self.tag_to_string(edit.findParent('div'))
cov = issue.find('figure', attrs={'class':lambda x: x and 'wp-block-image' in x.split()})
if cov:
self.cover_url = cov.img['src']
div = issue.find('div', attrs={'class':lambda x: x and 'entry-content' in x.split()})
feeds = []
h2 = div.findAll('h2', attrs={'class':lambda x: x and 'wp-block-heading' in x.split()})
lt = div.findAll(attrs={'class':'display-posts-listing'})
for x, y in zip(h2, lt):
section = self.tag_to_string(x).strip()
self.log(section)
articles = []
for a in y.findAll('a', href=True, attrs={'class':'title'}):
url = a['href']
title = self.tag_to_string(a).strip()
desc = ''
exp = a.findNext(attrs={'class':'excerpt'})
if exp:
desc = self.tag_to_string(exp).strip()
self.log('\t', title, '\n\t', desc, '\n\t\t', url)
articles.append({'title': title, 'description':desc, 'url': url})
if articles:
feeds.append((section, articles))
return feeds
def get_browser(self, *args, **kwargs):
return self
def clone_browser(self, *args, **kwargs):
return self.get_browser()
def open_novisit(self, *args, **kwargs):
br = browser()
return br.open_novisit(*args, **kwargs)
open = open_novisit

View File

@ -0,0 +1,103 @@
'''
https://www.world-archaeology.com
'''
from calibre import browser
from calibre.web.feeds.news import BasicNewsRecipe
class worldarch(BasicNewsRecipe):
title = 'The Past: Current World Archaeology'
language = 'en'
__author__ = 'unkn0wn'
description = (
'Travel the globe with Current World Archaeology, the magazine that brings you up-to-date with the latest archaeological discoveries. '
'Explore sites and sights through our exclusive features and eye-popping photography. We bring you the stories from the '
'archaeologists themselves, so you learn first-hand from the experts about the latest finds and most up-to-date research. '
'Published six times a year.'
)
no_stylesheets = True
use_embedded_content = False
remove_attributes = ['style', 'height', 'width']
ignore_duplicate_articles = {'url'}
resolve_internal_links = True
masthead_url = 'https://i0.wp.com/www.world-archaeology.com/wp-content/uploads/2016/02/cwa-logo.png'
simultaneous_downloads = 1
extra_css = '''
[class^="meta"] { font-size:small; }
.post-subtitle { font-style: italic; color:#202020; }
.wp-block-image { font-size:small; text-align:center; }
'''
keep_only_tags = [
dict(attrs={'class':lambda x: x and '__header' in x}),
dict(attrs={'class':lambda x: x and '__background' in x}),
dict(attrs={'class':lambda x: x and '__body_area' in x}),
]
remove_tags = [
dict(attrs={'class':'ad-break'}),
dict(attrs={'class':lambda x: x and 'avatar' in x.split()}),
dict(attrs={'class':lambda x: x and '--share' in x})
]
def preprocess_html(self, soup):
exp = soup.find(attrs={'class':lambda x: x and 'post-subtitle' in x.split()})
if exp:
exp.name = 'p'
return soup
def parse_index(self):
soup = self.index_to_soup('https://the-past.com/category/magazines/cwa/')
art = soup.find('article', attrs={'class':lambda x: x and 'tag-magazines' in x.split()})
url = art.h2.a['href']
# for past editions, add url
# url = ''
issue = self.index_to_soup(url)
ti = issue.find('h1', attrs={'class':lambda x: x and 'post-title' in x.split()})
if ti:
self.title = self.tag_to_string(ti).strip()
dt = soup.find(attrs={'class':lambda x: x and '__date' in x})
if dt:
self.timefmt = ' [' + self.tag_to_string(dt).strip() + ']'
edit = issue.find('h2', attrs={'id':'from-the-editor'})
if edit and edit.findParent('div'):
self.description = self.tag_to_string(edit.findParent('div'))
cov = issue.find('figure', attrs={'class':lambda x: x and 'wp-block-image' in x.split()})
if cov:
self.cover_url = cov.img['src']
div = issue.find('div', attrs={'class':lambda x: x and 'entry-content' in x.split()})
feeds = []
h2 = div.findAll('h2', attrs={'class':lambda x: x and 'wp-block-heading' in x.split()})
lt = div.findAll(attrs={'class':'display-posts-listing'})
for x, y in zip(h2, lt):
section = self.tag_to_string(x).strip()
self.log(section)
articles = []
for a in y.findAll('a', href=True, attrs={'class':'title'}):
url = a['href']
title = self.tag_to_string(a).strip()
desc = ''
exp = a.findNext(attrs={'class':'excerpt'})
if exp:
desc = self.tag_to_string(exp).strip()
self.log('\t', title, '\n\t', desc, '\n\t\t', url)
articles.append({'title': title, 'description':desc, 'url': url})
if articles:
feeds.append((section, articles))
return feeds
def get_browser(self, *args, **kwargs):
return self
def clone_browser(self, *args, **kwargs):
return self.get_browser()
def open_novisit(self, *args, **kwargs):
br = browser()
return br.open_novisit(*args, **kwargs)
open = open_novisit