mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Use the new chromium based scraper for the kobo books plugin
Seems like a more robust workaround for its Akamai based network protocol analysis based blocking than using python user agents.
This commit is contained in:
parent
4a0b742d2d
commit
4e72452688
@ -1,22 +1,21 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||||
|
|
||||||
store_version = 9 # Needed for dynamic plugin loading
|
store_version = 10 # Needed for dynamic plugin loading
|
||||||
|
|
||||||
__license__ = 'GPL 3'
|
__license__ = 'GPL 3'
|
||||||
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
|
||||||
__docformat__ = 'restructuredtext en'
|
__docformat__ = 'restructuredtext en'
|
||||||
|
|
||||||
from contextlib import closing
|
|
||||||
try:
|
try:
|
||||||
from urllib.parse import quote_plus
|
from urllib.parse import quote_plus
|
||||||
except ImportError:
|
except ImportError:
|
||||||
from urllib import quote_plus
|
from urllib import quote_plus
|
||||||
|
|
||||||
from lxml import html, etree
|
from lxml import etree, html
|
||||||
import string, random
|
|
||||||
|
|
||||||
from calibre import browser, url_slash_cleaner
|
from calibre import url_slash_cleaner
|
||||||
|
from calibre.ebooks.chardet import strip_encoding_declarations
|
||||||
from calibre.ebooks.metadata import authors_to_string
|
from calibre.ebooks.metadata import authors_to_string
|
||||||
from calibre.gui2 import open_url
|
from calibre.gui2 import open_url
|
||||||
from calibre.gui2.store import StorePlugin
|
from calibre.gui2.store import StorePlugin
|
||||||
@ -24,29 +23,25 @@ from calibre.gui2.store.basic_config import BasicStoreConfig
|
|||||||
from calibre.gui2.store.search_result import SearchResult
|
from calibre.gui2.store.search_result import SearchResult
|
||||||
from calibre.gui2.store.web_store_dialog import WebStoreDialog
|
from calibre.gui2.store.web_store_dialog import WebStoreDialog
|
||||||
|
|
||||||
|
scraper = None
|
||||||
|
|
||||||
def get_browser():
|
|
||||||
# kobo books uses akamai which for some reason times out sending responses
|
def read_url(url):
|
||||||
# when the UA is a real browser UA. It currently seems to work fine with a
|
# Kobo uses Akamai which has some bot detection that uses network/tls
|
||||||
# UA of the form python/word.
|
# protocol data. So use the Chromium network stack to make the request
|
||||||
word_len = random.randint(3, 8)
|
global scraper
|
||||||
word = ''.join(random.choice(string.ascii_lowercase) for i in range(word_len))
|
if scraper is None:
|
||||||
major_ver = random.randint(1, 3)
|
from calibre.scraper.simple import Overseer
|
||||||
minor_ver = random.randint(0, 12)
|
scraper = Overseer()
|
||||||
br = browser(user_agent='python/{}-{}.{}'.format(word, major_ver, minor_ver), verify_ssl_certificates=False)
|
return strip_encoding_declarations(scraper.fetch_url(url))
|
||||||
return br
|
|
||||||
|
|
||||||
|
|
||||||
def search_kobo(query, max_results=10, timeout=60, write_html_to=None):
|
def search_kobo(query, max_results=10, timeout=60, write_html_to=None):
|
||||||
from css_selectors import Select
|
from css_selectors import Select
|
||||||
url = 'https://www.kobobooks.com/search/search.html?q=' + quote_plus(query)
|
url = 'https://www.kobobooks.com/search/search.html?q=' + quote_plus(query)
|
||||||
|
raw = read_url(url)
|
||||||
br = get_browser()
|
|
||||||
|
|
||||||
with closing(br.open(url, timeout=timeout)) as f:
|
|
||||||
raw = f.read()
|
|
||||||
if write_html_to is not None:
|
if write_html_to is not None:
|
||||||
with open(write_html_to, 'wb') as f:
|
with open(write_html_to, 'w') as f:
|
||||||
f.write(raw)
|
f.write(raw)
|
||||||
doc = html.fromstring(raw)
|
doc = html.fromstring(raw)
|
||||||
select = Select(doc)
|
select = Select(doc)
|
||||||
@ -101,7 +96,7 @@ def search_kobo(query, max_results=10, timeout=60, write_html_to=None):
|
|||||||
|
|
||||||
class KoboStore(BasicStoreConfig, StorePlugin):
|
class KoboStore(BasicStoreConfig, StorePlugin):
|
||||||
|
|
||||||
minimum_calibre_version = (2, 21, 0)
|
minimum_calibre_version = (5, 40, 1)
|
||||||
|
|
||||||
def open(self, parent=None, detail_item=None, external=False):
|
def open(self, parent=None, detail_item=None, external=False):
|
||||||
pub_id = '0dsO3kDu/AU'
|
pub_id = '0dsO3kDu/AU'
|
||||||
@ -127,10 +122,8 @@ class KoboStore(BasicStoreConfig, StorePlugin):
|
|||||||
yield result
|
yield result
|
||||||
|
|
||||||
def get_details(self, search_result, timeout):
|
def get_details(self, search_result, timeout):
|
||||||
br = get_browser()
|
raw = read_url(search_result.detail_item)
|
||||||
with closing(br.open(search_result.detail_item, timeout=timeout)) as nf:
|
idata = html.fromstring(raw)
|
||||||
idata = html.fromstring(nf.read())
|
|
||||||
search_result.author = ', '.join(idata.xpath('.//h2[contains(@class, "author")]//a/text()'))
|
|
||||||
if idata.xpath('boolean(//div[@class="bookitem-secondary-metadata"]//li[contains(text(), "Download options")])'):
|
if idata.xpath('boolean(//div[@class="bookitem-secondary-metadata"]//li[contains(text(), "Download options")])'):
|
||||||
if idata.xpath('boolean(//div[@class="bookitem-secondary-metadata"]//li[contains(text(), "DRM-Free")])'):
|
if idata.xpath('boolean(//div[@class="bookitem-secondary-metadata"]//li[contains(text(), "DRM-Free")])'):
|
||||||
search_result.drm = SearchResult.DRM_UNLOCKED
|
search_result.drm = SearchResult.DRM_UNLOCKED
|
||||||
|
Loading…
x
Reference in New Issue
Block a user