Use the new chromium based scraper for the kobo books plugin

Seems like a more robust workaround for its Akamai based network
protocol analysis based blocking than using python user agents.
This commit is contained in:
Kovid Goyal 2022-04-02 12:28:42 +05:30
parent 4a0b742d2d
commit 4e72452688
No known key found for this signature in database
GPG Key ID: 06BC317B515ACE7C

View File

@ -1,22 +1,21 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals from __future__ import absolute_import, division, print_function, unicode_literals
store_version = 9 # Needed for dynamic plugin loading store_version = 10 # Needed for dynamic plugin loading
__license__ = 'GPL 3' __license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>' __copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en' __docformat__ = 'restructuredtext en'
from contextlib import closing
try: try:
from urllib.parse import quote_plus from urllib.parse import quote_plus
except ImportError: except ImportError:
from urllib import quote_plus from urllib import quote_plus
from lxml import html, etree from lxml import etree, html
import string, random
from calibre import browser, url_slash_cleaner from calibre import url_slash_cleaner
from calibre.ebooks.chardet import strip_encoding_declarations
from calibre.ebooks.metadata import authors_to_string from calibre.ebooks.metadata import authors_to_string
from calibre.gui2 import open_url from calibre.gui2 import open_url
from calibre.gui2.store import StorePlugin from calibre.gui2.store import StorePlugin
@ -24,84 +23,80 @@ from calibre.gui2.store.basic_config import BasicStoreConfig
from calibre.gui2.store.search_result import SearchResult from calibre.gui2.store.search_result import SearchResult
from calibre.gui2.store.web_store_dialog import WebStoreDialog from calibre.gui2.store.web_store_dialog import WebStoreDialog
scraper = None
def get_browser():
# kobo books uses akamai which for some reason times out sending responses def read_url(url):
# when the UA is a real browser UA. It currently seems to work fine with a # Kobo uses Akamai which has some bot detection that uses network/tls
# UA of the form python/word. # protocol data. So use the Chromium network stack to make the request
word_len = random.randint(3, 8) global scraper
word = ''.join(random.choice(string.ascii_lowercase) for i in range(word_len)) if scraper is None:
major_ver = random.randint(1, 3) from calibre.scraper.simple import Overseer
minor_ver = random.randint(0, 12) scraper = Overseer()
br = browser(user_agent='python/{}-{}.{}'.format(word, major_ver, minor_ver), verify_ssl_certificates=False) return strip_encoding_declarations(scraper.fetch_url(url))
return br
def search_kobo(query, max_results=10, timeout=60, write_html_to=None): def search_kobo(query, max_results=10, timeout=60, write_html_to=None):
from css_selectors import Select from css_selectors import Select
url = 'https://www.kobobooks.com/search/search.html?q=' + quote_plus(query) url = 'https://www.kobobooks.com/search/search.html?q=' + quote_plus(query)
raw = read_url(url)
if write_html_to is not None:
with open(write_html_to, 'w') as f:
f.write(raw)
doc = html.fromstring(raw)
select = Select(doc)
for i, item in enumerate(select('.result-items .item-wrapper.book')):
if i == max_results:
break
for img in select('.item-image img[src]', item):
cover_url = img.get('src')
if cover_url.startswith('//'):
cover_url = 'https:' + cover_url
break
else:
cover_url = None
br = get_browser() for p in select('h2.title', item):
title = etree.tostring(p, method='text', encoding='unicode').strip()
with closing(br.open(url, timeout=timeout)) as f: for a in select('a[href]', p):
raw = f.read() url = a.get('href')
if write_html_to is not None:
with open(write_html_to, 'wb') as f:
f.write(raw)
doc = html.fromstring(raw)
select = Select(doc)
for i, item in enumerate(select('.result-items .item-wrapper.book')):
if i == max_results:
break
for img in select('.item-image img[src]', item):
cover_url = img.get('src')
if cover_url.startswith('//'):
cover_url = 'https:' + cover_url
break break
else: else:
cover_url = None url = None
break
else:
title = None
if title:
for p in select('p.subtitle', item):
title += ' - ' + etree.tostring(p, method='text', encoding='unicode').strip()
for p in select('h2.title', item): authors = []
title = etree.tostring(p, method='text', encoding='unicode').strip() for a in select('.contributors a.contributor-name', item):
for a in select('a[href]', p): authors.append(etree.tostring(a, method='text', encoding='unicode').strip())
url = a.get('href') authors = authors_to_string(authors)
break
else:
url = None
break
else:
title = None
if title:
for p in select('p.subtitle', item):
title += ' - ' + etree.tostring(p, method='text', encoding='unicode').strip()
authors = [] for p in select('p.price', item):
for a in select('.contributors a.contributor-name', item): price = etree.tostring(p, method='text', encoding='unicode').strip()
authors.append(etree.tostring(a, method='text', encoding='unicode').strip()) break
authors = authors_to_string(authors) else:
price = None
for p in select('p.price', item): if title and authors and url:
price = etree.tostring(p, method='text', encoding='unicode').strip() s = SearchResult()
break s.cover_url = cover_url
else: s.title = title
price = None s.author = authors
s.price = price
s.detail_item = url
s.formats = 'EPUB'
s.drm = SearchResult.DRM_UNKNOWN
if title and authors and url: yield s
s = SearchResult()
s.cover_url = cover_url
s.title = title
s.author = authors
s.price = price
s.detail_item = url
s.formats = 'EPUB'
s.drm = SearchResult.DRM_UNKNOWN
yield s
class KoboStore(BasicStoreConfig, StorePlugin): class KoboStore(BasicStoreConfig, StorePlugin):
minimum_calibre_version = (2, 21, 0) minimum_calibre_version = (5, 40, 1)
def open(self, parent=None, detail_item=None, external=False): def open(self, parent=None, detail_item=None, external=False):
pub_id = '0dsO3kDu/AU' pub_id = '0dsO3kDu/AU'
@ -127,17 +122,15 @@ class KoboStore(BasicStoreConfig, StorePlugin):
yield result yield result
def get_details(self, search_result, timeout): def get_details(self, search_result, timeout):
br = get_browser() raw = read_url(search_result.detail_item)
with closing(br.open(search_result.detail_item, timeout=timeout)) as nf: idata = html.fromstring(raw)
idata = html.fromstring(nf.read()) if idata.xpath('boolean(//div[@class="bookitem-secondary-metadata"]//li[contains(text(), "Download options")])'):
search_result.author = ', '.join(idata.xpath('.//h2[contains(@class, "author")]//a/text()')) if idata.xpath('boolean(//div[@class="bookitem-secondary-metadata"]//li[contains(text(), "DRM-Free")])'):
if idata.xpath('boolean(//div[@class="bookitem-secondary-metadata"]//li[contains(text(), "Download options")])'): search_result.drm = SearchResult.DRM_UNLOCKED
if idata.xpath('boolean(//div[@class="bookitem-secondary-metadata"]//li[contains(text(), "DRM-Free")])'): if idata.xpath('boolean(//div[@class="bookitem-secondary-metadata"]//li[contains(text(), "Adobe DRM")])'):
search_result.drm = SearchResult.DRM_UNLOCKED search_result.drm = SearchResult.DRM_LOCKED
if idata.xpath('boolean(//div[@class="bookitem-secondary-metadata"]//li[contains(text(), "Adobe DRM")])'): else:
search_result.drm = SearchResult.DRM_LOCKED search_result.drm = SearchResult.DRM_UNKNOWN
else:
search_result.drm = SearchResult.DRM_UNKNOWN
return True return True