From 37771022cec803e861250d376dbcec8e9a7728d6 Mon Sep 17 00:00:00 2001
From: un-pogaz <46523284+un-pogaz@users.noreply.github.com>
Date: Fri, 24 Jan 2025 11:14:14 +0100
Subject: [PATCH] uniform string quote (auto-fix)
ruff 'Q'
---
manual/custom.py | 4 +-
manual/plugin_examples/interface_demo/main.py | 2 +-
recipes/1843.recipe | 38 +-
recipes/20_minutos.recipe | 4 +-
recipes/DrawAndCook.recipe | 2 +-
recipes/TheMITPressReader.recipe | 6 +-
recipes/abc_es.recipe | 4 +-
recipes/acrimed.recipe | 4 +-
recipes/adventuregamers.recipe | 4 +-
recipes/afr.recipe | 2 +-
recipes/afrique_21.recipe | 6 +-
recipes/al_jazeera.recipe | 4 +-
recipes/al_monitor.recipe | 6 +-
recipes/albert_mohler.recipe | 4 +-
recipes/ald.recipe | 8 +-
recipes/alternatives_economiques.recipe | 2 +-
recipes/am730.recipe | 2 +-
recipes/ambito.recipe | 4 +-
recipes/american_thinker.recipe | 2 +-
recipes/anandtech.recipe | 2 +-
recipes/ancient_egypt.recipe | 2 +-
recipes/andhrajyothy_ap.recipe | 2 +-
recipes/andhrajyothy_tel.recipe | 2 +-
recipes/arcamax.recipe | 18 +-
recipes/arret_sur_images.recipe | 12 +-
recipes/asahi_shimbun_en.recipe | 148 +-
recipes/asianreviewofbooks.recipe | 4 +-
recipes/ba_herald.recipe | 4 +-
recipes/bangkokpost.recipe | 2 +-
recipes/barrons.recipe | 8 +-
recipes/bbc.recipe | 32 +-
recipes/bbc_brasil.recipe | 12 +-
recipes/billorielly.recipe | 2 +-
recipes/blesk.recipe | 4 +-
recipes/blic.recipe | 4 +-
recipes/bloomberg-business-week.recipe | 2 +-
recipes/bookforummagazine.recipe | 56 +-
recipes/borsen_dk.recipe | 6 +-
recipes/boston.com.recipe | 28 +-
recipes/boston_globe_print_edition.recipe | 14 +-
recipes/brewiarz.recipe | 40 +-
recipes/business_insider.recipe | 4 +-
recipes/business_standard_print.recipe | 2 +-
recipes/business_today.recipe | 2 +-
recipes/cacm.recipe | 18 +-
recipes/calcalist.recipe | 48 +-
recipes/calgary_herald.recipe | 30 +-
recipes/capital_gr.recipe | 2 +-
recipes/caravan_magazine.recipe | 6 +-
recipes/cato.recipe | 4 +-
recipes/chr_mon.recipe | 4 +-
recipes/chronicle_higher_ed.recipe | 10 +-
recipes/cicero.recipe | 30 +-
recipes/cincinnati_enquirer.recipe | 2 +-
recipes/ciperchile.recipe | 4 +-
recipes/clarin.recipe | 4 +-
recipes/cnetjapan.recipe | 16 +-
recipes/cnetjapan_digital.recipe | 16 +-
recipes/cnetjapan_release.recipe | 14 +-
recipes/cnetnews.recipe | 2 +-
recipes/cnn.recipe | 2 +-
recipes/contretemps.recipe | 6 +-
recipes/cosmos.recipe | 6 +-
recipes/courrierinternational.recipe | 8 +-
recipes/cubadebate.recipe | 4 +-
recipes/dainik_bhaskar.recipe | 2 +-
recipes/danas.recipe | 6 +-
recipes/degentenaar.recipe | 4 +-
recipes/democracy_journal.recipe | 4 +-
recipes/demorgen_be.recipe | 10 +-
recipes/denik.cz.recipe | 4 +-
recipes/denikn.cz.recipe | 8 +-
recipes/deredactie.recipe | 10 +-
recipes/dilema.recipe | 4 +-
recipes/distrowatch_weekly.recipe | 30 +-
recipes/dnevnik_cro.recipe | 6 +-
recipes/donga.recipe | 40 +-
recipes/dr_dk.recipe | 12 +-
recipes/dzieje_pl.recipe | 16 +-
recipes/dziennik_pl.recipe | 2 +-
recipes/dziennik_polski.recipe | 2 +-
recipes/economist.recipe | 48 +-
recipes/economist_espresso.recipe | 10 +-
recipes/economist_free.recipe | 48 +-
recipes/economist_news.recipe | 14 +-
recipes/economist_search.recipe | 8 +-
recipes/economist_world_ahead.recipe | 26 +-
recipes/edmonton_journal.recipe | 30 +-
recipes/el_colombiano.recipe | 4 +-
recipes/el_cultural.recipe | 4 +-
recipes/el_diplo.recipe | 94 +-
recipes/el_pais.recipe | 4 +-
recipes/el_pais_babelia.recipe | 2 +-
recipes/elcohetealaluna.recipe | 4 +-
recipes/elcronista-arg.recipe | 4 +-
recipes/elektroda_pl.recipe | 2 +-
recipes/elmundo.recipe | 12 +-
recipes/elperiodico_spanish.recipe | 2 +-
recipes/en_globes_co_il.recipe | 28 +-
recipes/endgadget.recipe | 4 +-
recipes/equestria_daily.recipe | 22 +-
recipes/expansion_spanish.recipe | 10 +-
recipes/fastcompany.recipe | 4 +-
recipes/faz_net.recipe | 22 +-
recipes/financial_times.recipe | 2 +-
recipes/financialsense.recipe | 4 +-
recipes/first_things.recipe | 2 +-
recipes/flickr.recipe | 4 +-
recipes/flickr_es.recipe | 4 +-
recipes/fokus.recipe | 6 +-
recipes/folha.recipe | 4 +-
recipes/folhadesaopaulo_sub.recipe | 6 +-
recipes/foreign_policy.recipe | 2 +-
recipes/foreignaffairs.recipe | 22 +-
recipes/foxnews.recipe | 4 +-
recipes/free_inquiry.recipe | 4 +-
recipes/frontline.recipe | 2 +-
recipes/galaxys_edge.recipe | 12 +-
recipes/gazeta-prawna-calibre-v1.recipe | 10 +-
recipes/globes_co_il.recipe | 34 +-
recipes/go_comics.recipe | 46 +-
recipes/google_news.recipe | 2 +-
recipes/gosc_full.recipe | 2 +-
recipes/granta.recipe | 6 +-
recipes/grantland.recipe | 4 +-
recipes/greensboro_news_and_record.recipe | 2 +-
recipes/guardian.recipe | 8 +-
recipes/haaretz_en.recipe | 4 +-
recipes/hankyoreh21.recipe | 2 +-
recipes/harpers.recipe | 6 +-
recipes/hbr.recipe | 128 +-
recipes/heise.recipe | 2 +-
recipes/heise_ct.recipe | 2 +-
recipes/heise_ix.recipe | 2 +-
recipes/hindu.recipe | 4 +-
recipes/hindustan_times_print.recipe | 2 +-
recipes/history_today.recipe | 8 +-
recipes/hoy.recipe | 2 +-
recipes/hurriyet.recipe | 4 +-
recipes/idnes.recipe | 2 +-
recipes/ieee_spectrum_mag.recipe | 14 +-
recipes/il_messaggero.recipe | 6 +-
recipes/il_post.recipe | 44 +-
recipes/ilsole24ore.recipe | 4 +-
recipes/inc.recipe | 2 +-
recipes/independent_australia.recipe | 2 +-
recipes/india_today.recipe | 2 +-
recipes/indian_express.recipe | 6 +-
recipes/ing_dk.recipe | 6 +-
recipes/instapaper.recipe | 2 +-
recipes/internazionale.recipe | 6 +-
recipes/iol_za.recipe | 4 +-
recipes/iprofesional.recipe | 4 +-
recipes/jacobinmag.recipe | 4 +-
recipes/japan_times.recipe | 66 +-
recipes/javalobby.recipe | 6 +-
recipes/jijinews.recipe | 4 +-
recipes/kirkusreviews.recipe | 118 +-
recipes/kopalniawiedzy.recipe | 2 +-
recipes/korben.recipe | 2 +-
recipes/kudy_z_nudy.recipe | 4 +-
recipes/la_jornada.recipe | 6 +-
recipes/la_republica.recipe | 8 +-
recipes/lalibre_be.recipe | 2 +-
recipes/lanacion.recipe | 6 +-
recipes/lapoliticaonline_ar.recipe | 6 +-
recipes/laprensa.recipe | 52 +-
recipes/le_canard_enchaine.recipe | 8 +-
recipes/le_gorafi.recipe | 4 +-
recipes/le_monde_diplomatique_fr.recipe | 4 +-
recipes/le_monde_sub_paper.recipe | 54 +-
recipes/le_peuple_breton.recipe | 2 +-
recipes/leggo_it.recipe | 6 +-
recipes/lemonde_dip.recipe | 4 +-
recipes/lepoint.recipe | 2 +-
recipes/lexpress.recipe | 4 +-
recipes/liberation.recipe | 4 +-
recipes/libertad_digital.recipe | 4 +-
recipes/livemint.recipe | 4 +-
recipes/livescience.recipe | 4 +-
recipes/lwn_free.recipe | 8 +-
recipes/lwn_weekly.recipe | 4 +-
recipes/mainichi.recipe | 14 +-
recipes/mainichi_en.recipe | 20 +-
recipes/mainichi_science_news.recipe | 4 +-
recipes/marca.recipe | 4 +-
recipes/marctv.recipe | 2 +-
recipes/mediaindonesia.recipe | 2 +-
recipes/mediapart.recipe | 2 +-
recipes/merco_press.recipe | 2 +-
recipes/mit_technology_review.recipe | 10 +-
recipes/mmc_rtv.recipe | 2 +-
recipes/modoros.recipe | 2 +-
recipes/montreal_gazette.recipe | 30 +-
recipes/nacional_cro.recipe | 6 +-
recipes/natgeo.recipe | 4 +-
recipes/natgeo_kids.recipe | 4 +-
recipes/natgeo_traveller.recipe | 4 +-
recipes/natgeohis.recipe | 4 +-
recipes/natgeomag.recipe | 4 +-
recipes/nature.recipe | 16 +-
recipes/nautilus.recipe | 8 +-
recipes/new_scientist.recipe | 2 +-
recipes/new_scientist_mag.recipe | 2 +-
recipes/new_statesman.recipe | 4 +-
recipes/new_yorker.recipe | 8 +-
recipes/newrepublicmag.recipe | 178 +--
recipes/news24.recipe | 6 +-
recipes/news_busters.recipe | 2 +-
recipes/newsweek_polska.recipe | 12 +-
recipes/nezavisne_novine.recipe | 4 +-
recipes/nikkei_news.recipe | 34 +-
recipes/nikkeiasia.recipe | 4 +-
recipes/njuz_net.recipe | 4 +-
recipes/novilist_novine_hr.recipe | 4 +-
recipes/novosti.recipe | 4 +-
recipes/nrc.nl.recipe | 36 +-
recipes/nrc_next.recipe | 2 +-
recipes/nspm.recipe | 4 +-
recipes/nspm_int.recipe | 4 +-
recipes/nyt_magazine.recipe | 4 +-
recipes/nyt_tmag.recipe | 4 +-
recipes/nytfeeds.recipe | 4 +-
recipes/nytimes.recipe | 4 +-
recipes/nytimes_sports.recipe | 4 +-
recipes/nytimes_sub.recipe | 4 +-
recipes/nytimes_tech.recipe | 4 +-
recipes/nytimesbook.recipe | 4 +-
recipes/observatorul_cultural.recipe | 2 +-
recipes/observer_gb.recipe | 14 +-
recipes/oc_register.recipe | 24 +-
recipes/omgubuntu.recipe | 4 +-
recipes/orient_21.recipe | 6 +-
recipes/ottawa_citizen.recipe | 30 +-
recipes/outlook_india.recipe | 2 +-
recipes/pagina12.recipe | 6 +-
recipes/parool.recipe | 20 +-
recipes/pecat.recipe | 4 +-
recipes/people_daily.recipe | 8 +-
recipes/pescanik.recipe | 4 +-
recipes/politiko_dk.recipe | 8 +-
recipes/portafolio.recipe | 4 +-
recipes/pravda_por.recipe | 4 +-
recipes/presse_portal.recipe | 2 +-
recipes/private_eye.recipe | 20 +-
recipes/pro_physik.recipe | 2 +-
recipes/prospectmaguk_free.recipe | 96 +-
recipes/psych.recipe | 8 +-
recipes/quanta_magazine.recipe | 12 +-
recipes/queueacmorg.recipe | 8 +-
recipes/readitlater.recipe | 20 +-
recipes/real_clear.recipe | 86 +-
recipes/regina_leader_post.recipe | 20 +-
recipes/respekt_magazine.recipe | 40 +-
recipes/reuters.recipe | 4 +-
recipes/revista22.recipe | 2 +-
recipes/revista_veintitres.recipe | 6 +-
recipes/rts.recipe | 4 +-
recipes/russiafeed.recipe | 4 +-
recipes/rzeczpospolita.recipe | 6 +-
recipes/saskatoon_star_phoenix.recipe | 20 +-
recipes/science_news.recipe | 16 +-
recipes/scientific_american.recipe | 76 +-
recipes/scmp.recipe | 52 +-
recipes/scprint.recipe | 8 +-
recipes/screen_rant.recipe | 2 +-
recipes/seminar_magazine.recipe | 2 +-
recipes/sign_of_the_times.recipe | 4 +-
recipes/singtaohk.recipe | 2 +-
recipes/skeptical_enquirer.recipe | 4 +-
recipes/smh.recipe | 64 +-
recipes/sol_haber.recipe | 2 +-
recipes/star_gazetesi.recipe | 56 +-
recipes/strange_horizons.recipe | 4 +-
recipes/taz_rss.recipe | 2 +-
recipes/telepolis.recipe | 2 +-
recipes/thairath.recipe | 2 +-
recipes/the_diplomat.recipe | 4 +-
recipes/the_federalist.recipe | 4 +-
recipes/the_nation.recipe | 6 +-
recipes/the_philippine_daily_inquirer.recipe | 2 +-
recipes/the_saturday_paper.recipe | 4 +-
recipes/the_week_magazine_free.recipe | 2 +-
recipes/the_week_uk.recipe | 2 +-
recipes/theecocolapse.recipe | 4 +-
recipes/theeconomictimes_india.recipe | 2 +-
...heeconomictimes_india_print_edition.recipe | 6 +-
recipes/thenewcriterion.recipe | 6 +-
recipes/theoldie.recipe | 34 +-
recipes/tijd.recipe | 8 +-
recipes/toi.recipe | 4 +-
recipes/toiprint.recipe | 2 +-
recipes/tyzden.recipe | 4 +-
recipes/ugeskriftet.recipe | 4 +-
recipes/uncrate.recipe | 4 +-
recipes/unian_net_en.recipe | 2 +-
recipes/vancouver_province.recipe | 30 +-
recipes/vancouver_sun.recipe | 30 +-
recipes/variety.recipe | 4 +-
recipes/vecernji_list.recipe | 6 +-
recipes/vic_times.recipe | 24 +-
recipes/villagevoice.recipe | 10 +-
recipes/volksrant.recipe | 14 +-
recipes/vreme.recipe | 4 +-
recipes/windows_star.recipe | 4 +-
recipes/windsor_star.recipe | 20 +-
recipes/wired.recipe | 6 +-
recipes/wired_daily.recipe | 4 +-
recipes/words_without_borders.recipe | 4 +-
recipes/wsj.recipe | 2 +-
recipes/wsj_free.recipe | 4 +-
recipes/wsj_news.recipe | 2 +-
recipes/yomiuri_world.recipe | 4 +-
recipes/zaobao.recipe | 2 +-
recipes/zdnet.fr.recipe | 2 +-
recipes/zeitde_sub.recipe | 10 +-
recipes/zerodeux.recipe | 2 +-
recipes/zycie_warszawy.recipe | 2 +-
resources/default_tweaks.py | 2 +-
ruff-strict-pep8.toml | 8 +-
setup/build.py | 10 +-
setup/hosting.py | 2 +-
setup/install.py | 2 +-
setup/installers.py | 2 +-
setup/plugins_mirror.py | 2 +-
setup/publish.py | 4 +-
setup/upload.py | 4 +-
setup/vcvars.py | 34 +-
setup/wincross.py | 16 +-
src/calibre/__init__.py | 8 +-
src/calibre/constants.py | 2 +-
src/calibre/customize/__init__.py | 8 +-
src/calibre/customize/builtins.py | 6 +-
src/calibre/customize/zipplugin.py | 4 +-
src/calibre/db/backend.py | 6 +-
src/calibre/db/cache.py | 2 +-
src/calibre/db/cli/cmd_catalog.py | 14 +-
src/calibre/db/cli/cmd_check_library.py | 12 +-
src/calibre/db/cli/cmd_list.py | 2 +-
src/calibre/db/cli/cmd_list_categories.py | 4 +-
src/calibre/db/cli/tests.py | 16 +-
src/calibre/db/fts/connect.py | 2 +-
src/calibre/db/lazy.py | 4 +-
src/calibre/db/locking.py | 6 +-
src/calibre/db/notes/connect.py | 2 +-
src/calibre/db/schema_upgrades.py | 4 +-
src/calibre/db/tests/fts.py | 40 +-
src/calibre/db/tests/legacy.py | 4 +-
src/calibre/db/tests/locking.py | 2 +-
src/calibre/db/tests/reading.py | 8 +-
src/calibre/db/tests/writing.py | 14 +-
src/calibre/debug.py | 2 +-
src/calibre/devices/__init__.py | 2 +-
src/calibre/devices/android/driver.py | 4 +-
src/calibre/devices/cli.py | 152 +-
src/calibre/devices/cybook/t2b.py | 4 +-
src/calibre/devices/cybook/t4b.py | 2 +-
src/calibre/devices/eb600/driver.py | 2 +-
src/calibre/devices/errors.py | 54 +-
src/calibre/devices/interface.py | 14 +-
src/calibre/devices/jetbook/driver.py | 4 +-
src/calibre/devices/kindle/apnx.py | 8 +-
.../generators/accurate_page_generator.py | 6 +-
.../generators/exact_page_generator.py | 6 +-
.../generators/fast_page_generator.py | 4 +-
.../generators/pagebreak_page_generator.py | 4 +-
.../apnx_page_generator/i_page_generator.py | 4 +-
.../kindle/apnx_page_generator/page_group.py | 6 +-
.../apnx_page_generator/page_number_type.py | 6 +-
.../kindle/apnx_page_generator/pages.py | 2 +-
src/calibre/devices/kindle/bookmark.py | 14 +-
src/calibre/devices/kindle/driver.py | 8 +-
src/calibre/devices/kobo/bookmark.py | 14 +-
src/calibre/devices/kobo/books.py | 60 +-
src/calibre/devices/kobo/driver.py | 496 +++---
src/calibre/devices/kobo/kobotouch_config.py | 92 +-
src/calibre/devices/mtp/unix/driver.py | 2 +-
src/calibre/devices/mtp/windows/driver.py | 2 +-
src/calibre/devices/paladin/driver.py | 32 +-
src/calibre/devices/prs505/sony_cache.py | 18 +-
src/calibre/devices/prst1/driver.py | 34 +-
.../devices/smart_device_app/driver.py | 34 +-
src/calibre/devices/usbms/device.py | 2 +-
src/calibre/devices/usbms/driver.py | 2 +-
src/calibre/devices/usbms/hal.py | 10 +-
src/calibre/devices/user_defined/driver.py | 2 +-
src/calibre/devices/utils.py | 8 +-
src/calibre/devices/winusb.py | 22 +-
src/calibre/ebooks/chardet.py | 2 +-
src/calibre/ebooks/chm/metadata.py | 6 +-
src/calibre/ebooks/chm/reader.py | 8 +-
src/calibre/ebooks/conversion/cli.py | 4 +-
.../ebooks/conversion/plugins/chm_input.py | 2 +-
.../ebooks/conversion/plugins/comic_input.py | 6 +-
.../ebooks/conversion/plugins/djvu_input.py | 2 +-
.../ebooks/conversion/plugins/epub_input.py | 2 +-
.../ebooks/conversion/plugins/epub_output.py | 2 +-
.../ebooks/conversion/plugins/fb2_input.py | 6 +-
.../ebooks/conversion/plugins/html_output.py | 2 +-
.../ebooks/conversion/plugins/htmlz_output.py | 2 +-
.../ebooks/conversion/plugins/lrf_output.py | 2 +-
.../ebooks/conversion/plugins/mobi_output.py | 4 +-
.../ebooks/conversion/plugins/snb_input.py | 10 +-
.../ebooks/conversion/plugins/snb_output.py | 60 +-
.../ebooks/conversion/plugins/txt_input.py | 6 +-
.../ebooks/conversion/plugins/txt_output.py | 6 +-
src/calibre/ebooks/conversion/plumber.py | 2 +-
src/calibre/ebooks/conversion/preprocess.py | 26 +-
src/calibre/ebooks/conversion/utils.py | 164 +-
src/calibre/ebooks/covers.py | 2 +-
src/calibre/ebooks/djvu/djvubzzdec.py | 18 +-
src/calibre/ebooks/docx/index.py | 6 +-
src/calibre/ebooks/docx/to_html.py | 2 +-
src/calibre/ebooks/docx/writer/container.py | 36 +-
src/calibre/ebooks/docx/writer/fonts.py | 2 +-
src/calibre/ebooks/docx/writer/images.py | 10 +-
src/calibre/ebooks/docx/writer/links.py | 2 +-
src/calibre/ebooks/docx/writer/lists.py | 2 +-
src/calibre/ebooks/docx/writer/tables.py | 2 +-
src/calibre/ebooks/epub/pages.py | 2 +-
src/calibre/ebooks/html_transform_rules.py | 4 +-
src/calibre/ebooks/hyphenate.py | 20 +-
src/calibre/ebooks/lit/maps/__init__.py | 4 +-
src/calibre/ebooks/lit/maps/html.py | 1420 ++++++++---------
src/calibre/ebooks/lit/maps/opf.py | 106 +-
src/calibre/ebooks/lit/mssha1.py | 36 +-
src/calibre/ebooks/lit/reader.py | 76 +-
src/calibre/ebooks/lit/writer.py | 54 +-
src/calibre/ebooks/lrf/__init__.py | 10 +-
src/calibre/ebooks/lrf/html/__init__.py | 8 +-
src/calibre/ebooks/lrf/html/convert_from.py | 118 +-
src/calibre/ebooks/lrf/html/table.py | 2 +-
src/calibre/ebooks/lrf/input.py | 6 +-
src/calibre/ebooks/lrf/lrfparser.py | 6 +-
src/calibre/ebooks/lrf/meta.py | 242 +--
src/calibre/ebooks/lrf/objects.py | 64 +-
src/calibre/ebooks/lrf/pylrs/__init__.py | 4 +-
src/calibre/ebooks/lrf/pylrs/elements.py | 12 +-
src/calibre/ebooks/lrf/pylrs/pylrf.py | 164 +-
src/calibre/ebooks/lrf/pylrs/pylrfopt.py | 6 +-
src/calibre/ebooks/lrf/pylrs/pylrs.py | 642 ++++----
src/calibre/ebooks/lrf/tags.py | 80 +-
src/calibre/ebooks/metadata/__init__.py | 6 +-
src/calibre/ebooks/metadata/book/base.py | 4 +-
.../ebooks/metadata/book/json_codec.py | 6 +-
src/calibre/ebooks/metadata/book/render.py | 2 +-
src/calibre/ebooks/metadata/epub.py | 22 +-
src/calibre/ebooks/metadata/ereader.py | 4 +-
src/calibre/ebooks/metadata/fb2.py | 2 +-
src/calibre/ebooks/metadata/imp.py | 6 +-
src/calibre/ebooks/metadata/kdl.py | 2 +-
src/calibre/ebooks/metadata/kfx.py | 14 +-
src/calibre/ebooks/metadata/mobi.py | 28 +-
src/calibre/ebooks/metadata/odt.py | 2 +-
src/calibre/ebooks/metadata/opf2.py | 12 +-
src/calibre/ebooks/metadata/opf3.py | 2 +-
src/calibre/ebooks/metadata/pdb.py | 4 +-
src/calibre/ebooks/metadata/pml.py | 2 +-
src/calibre/ebooks/metadata/rb.py | 8 +-
src/calibre/ebooks/metadata/rtf.py | 14 +-
src/calibre/ebooks/metadata/snb.py | 2 +-
src/calibre/ebooks/metadata/sources/amazon.py | 10 +-
src/calibre/ebooks/metadata/sources/base.py | 4 +-
.../ebooks/metadata/sources/edelweiss.py | 6 +-
src/calibre/ebooks/metadata/sources/test.py | 12 +-
src/calibre/ebooks/metadata/toc.py | 4 +-
src/calibre/ebooks/metadata/topaz.py | 24 +-
src/calibre/ebooks/mobi/debug/headers.py | 2 +-
src/calibre/ebooks/mobi/langcodes.py | 194 +--
src/calibre/ebooks/mobi/mobiml.py | 6 +-
src/calibre/ebooks/mobi/reader/index.py | 4 +-
src/calibre/ebooks/mobi/reader/markup.py | 18 +-
src/calibre/ebooks/mobi/reader/mobi6.py | 4 +-
src/calibre/ebooks/mobi/reader/mobi8.py | 6 +-
src/calibre/ebooks/mobi/reader/ncx.py | 4 +-
src/calibre/ebooks/mobi/writer2/serializer.py | 2 +-
src/calibre/ebooks/mobi/writer8/exth.py | 2 +-
src/calibre/ebooks/mobi/writer8/main.py | 2 +-
src/calibre/ebooks/odt/input.py | 6 +-
src/calibre/ebooks/oeb/base.py | 100 +-
src/calibre/ebooks/oeb/polish/check/links.py | 4 +-
src/calibre/ebooks/oeb/polish/container.py | 8 +-
src/calibre/ebooks/oeb/polish/cover.py | 2 +-
src/calibre/ebooks/oeb/polish/create.py | 2 +-
.../ebooks/oeb/polish/tests/parsing.py | 2 +-
src/calibre/ebooks/oeb/polish/toc.py | 4 +-
src/calibre/ebooks/oeb/reader.py | 24 +-
src/calibre/ebooks/oeb/stylizer.py | 8 +-
.../ebooks/oeb/transforms/filenames.py | 2 +-
src/calibre/ebooks/oeb/transforms/flatcss.py | 20 +-
src/calibre/ebooks/oeb/transforms/htmltoc.py | 8 +-
src/calibre/ebooks/oeb/transforms/jacket.py | 6 +-
.../ebooks/oeb/transforms/manglecase.py | 4 +-
.../ebooks/oeb/transforms/rasterize.py | 4 +-
.../ebooks/oeb/transforms/structure.py | 2 +-
src/calibre/ebooks/oeb/writer.py | 16 +-
src/calibre/ebooks/pdb/haodoo/reader.py | 52 +-
src/calibre/ebooks/pdf/html_writer.py | 8 +-
src/calibre/ebooks/pdf/pdftohtml.py | 4 +-
src/calibre/ebooks/pdf/render/common.py | 2 +-
src/calibre/ebooks/pdf/render/graphics.py | 272 ++--
src/calibre/ebooks/readability/cleaners.py | 14 +-
src/calibre/ebooks/readability/debug.py | 4 +-
src/calibre/ebooks/readability/readability.py | 82 +-
src/calibre/ebooks/render_html.py | 2 +-
src/calibre/ebooks/rtf/preprocess.py | 18 +-
src/calibre/ebooks/rtf2xml/ParseRtf.py | 34 +-
src/calibre/ebooks/rtf2xml/add_brackets.py | 42 +-
src/calibre/ebooks/rtf2xml/body_styles.py | 14 +-
src/calibre/ebooks/rtf2xml/border_parse.py | 8 +-
src/calibre/ebooks/rtf2xml/char_set.py | 4 +-
src/calibre/ebooks/rtf2xml/check_brackets.py | 6 +-
src/calibre/ebooks/rtf2xml/colors.py | 40 +-
src/calibre/ebooks/rtf2xml/combine_borders.py | 6 +-
src/calibre/ebooks/rtf2xml/configure_txt.py | 4 +-
src/calibre/ebooks/rtf2xml/convert_to_tags.py | 46 +-
src/calibre/ebooks/rtf2xml/copy.py | 18 +-
.../ebooks/rtf2xml/default_encoding.py | 4 +-
src/calibre/ebooks/rtf2xml/delete_info.py | 28 +-
src/calibre/ebooks/rtf2xml/field_strings.py | 100 +-
src/calibre/ebooks/rtf2xml/fields_large.py | 38 +-
src/calibre/ebooks/rtf2xml/fields_small.py | 46 +-
src/calibre/ebooks/rtf2xml/fonts.py | 30 +-
src/calibre/ebooks/rtf2xml/footnote.py | 50 +-
src/calibre/ebooks/rtf2xml/get_char_map.py | 4 +-
src/calibre/ebooks/rtf2xml/get_options.py | 24 +-
src/calibre/ebooks/rtf2xml/group_borders.py | 26 +-
src/calibre/ebooks/rtf2xml/group_styles.py | 26 +-
src/calibre/ebooks/rtf2xml/header.py | 52 +-
.../ebooks/rtf2xml/headings_to_sections.py | 24 +-
src/calibre/ebooks/rtf2xml/hex_2_utf8.py | 74 +-
src/calibre/ebooks/rtf2xml/info.py | 42 +-
src/calibre/ebooks/rtf2xml/inline.py | 50 +-
src/calibre/ebooks/rtf2xml/line_endings.py | 4 +-
src/calibre/ebooks/rtf2xml/list_numbers.py | 40 +-
src/calibre/ebooks/rtf2xml/list_table.py | 64 +-
src/calibre/ebooks/rtf2xml/make_lists.py | 44 +-
src/calibre/ebooks/rtf2xml/old_rtf.py | 8 +-
src/calibre/ebooks/rtf2xml/options_trem.py | 32 +-
src/calibre/ebooks/rtf2xml/output.py | 20 +-
src/calibre/ebooks/rtf2xml/override_table.py | 32 +-
src/calibre/ebooks/rtf2xml/paragraph_def.py | 74 +-
src/calibre/ebooks/rtf2xml/paragraphs.py | 34 +-
src/calibre/ebooks/rtf2xml/pict.py | 40 +-
src/calibre/ebooks/rtf2xml/preamble_div.py | 78 +-
src/calibre/ebooks/rtf2xml/preamble_rest.py | 22 +-
src/calibre/ebooks/rtf2xml/process_tokens.py | 44 +-
.../ebooks/rtf2xml/replace_illegals.py | 10 +-
src/calibre/ebooks/rtf2xml/sections.py | 86 +-
src/calibre/ebooks/rtf2xml/styles.py | 70 +-
src/calibre/ebooks/rtf2xml/table.py | 54 +-
src/calibre/ebooks/rtf2xml/table_info.py | 14 +-
src/calibre/ebooks/rtf2xml/tokenize.py | 56 +-
src/calibre/ebooks/snb/snbfile.py | 62 +-
src/calibre/ebooks/snb/snbml.py | 42 +-
src/calibre/ebooks/textile/functions.py | 110 +-
src/calibre/ebooks/textile/unsmarten.py | 2 +-
src/calibre/ebooks/txt/processor.py | 2 +-
src/calibre/ebooks/unihandecode/__init__.py | 4 +-
src/calibre/ebooks/unihandecode/jadecoder.py | 16 +-
.../ebooks/unihandecode/unicodepoints.py | 102 +-
src/calibre/ebooks/unihandecode/unidecoder.py | 4 +-
src/calibre/gui2/__init__.py | 8 +-
src/calibre/gui2/actions/all_actions.py | 6 +-
src/calibre/gui2/actions/catalog.py | 4 +-
src/calibre/gui2/actions/delete.py | 2 +-
src/calibre/gui2/actions/device.py | 2 +-
src/calibre/gui2/actions/layout_actions.py | 4 +-
src/calibre/gui2/actions/open.py | 2 +-
src/calibre/gui2/actions/save_to_disk.py | 2 +-
src/calibre/gui2/bars.py | 8 +-
src/calibre/gui2/book_details.py | 6 +-
src/calibre/gui2/catalog/catalog_csv_xml.py | 2 +-
src/calibre/gui2/catalog/catalog_epub_mobi.py | 94 +-
src/calibre/gui2/comments_editor.py | 6 +-
src/calibre/gui2/convert/__init__.py | 6 +-
src/calibre/gui2/convert/gui_conversion.py | 2 +-
src/calibre/gui2/convert/metadata.py | 8 +-
src/calibre/gui2/convert/single.py | 32 +-
src/calibre/gui2/custom_column_widgets.py | 4 +-
src/calibre/gui2/device.py | 2 +-
.../device_drivers/tabbed_device_config.py | 40 +-
src/calibre/gui2/dialogs/add_from_isbn.py | 22 +-
src/calibre/gui2/dialogs/catalog.py | 8 +-
src/calibre/gui2/dialogs/choose_format.py | 2 +-
src/calibre/gui2/dialogs/comments_dialog.py | 4 +-
src/calibre/gui2/dialogs/confirm_delete.py | 10 +-
src/calibre/gui2/dialogs/custom_recipes.py | 20 +-
.../gui2/dialogs/data_files_manager.py | 2 +-
.../gui2/dialogs/edit_authors_dialog.py | 4 +-
src/calibre/gui2/dialogs/match_books.py | 2 +-
src/calibre/gui2/dialogs/message_box.py | 12 +-
src/calibre/gui2/dialogs/metadata_bulk.py | 48 +-
src/calibre/gui2/dialogs/multisort.py | 2 +-
src/calibre/gui2/dialogs/plugin_updater.py | 6 +-
src/calibre/gui2/dialogs/quickview.py | 2 +-
src/calibre/gui2/dialogs/restore_library.py | 2 +-
src/calibre/gui2/dialogs/scheduler.py | 58 +-
src/calibre/gui2/dialogs/search.py | 38 +-
src/calibre/gui2/dialogs/tag_list_editor.py | 6 +-
src/calibre/gui2/dialogs/template_dialog.py | 134 +-
src/calibre/gui2/font_family_chooser.py | 2 +-
src/calibre/gui2/icon_theme.py | 2 +-
src/calibre/gui2/init.py | 2 +-
src/calibre/gui2/job_indicator.py | 2 +-
src/calibre/gui2/layout.py | 10 +-
src/calibre/gui2/library/annotations.py | 2 +-
src/calibre/gui2/library/delegates.py | 2 +-
src/calibre/gui2/library/models.py | 18 +-
src/calibre/gui2/library/views.py | 2 +-
src/calibre/gui2/lrf_renderer/text.py | 10 +-
src/calibre/gui2/main.py | 2 +-
src/calibre/gui2/markdown_editor.py | 4 +-
.../gui2/markdown_syntax_highlighter.py | 96 +-
src/calibre/gui2/metadata/basic_widgets.py | 18 +-
src/calibre/gui2/metadata/single.py | 12 +-
src/calibre/gui2/metadata/single_download.py | 2 +-
src/calibre/gui2/notify.py | 10 +-
src/calibre/gui2/preferences/columns.py | 2 +-
.../gui2/preferences/create_custom_column.py | 74 +-
src/calibre/gui2/preferences/plugins.py | 2 +-
src/calibre/gui2/preferences/server.py | 4 +-
src/calibre/gui2/preferences/tweaks.py | 32 +-
src/calibre/gui2/proceed.py | 4 +-
src/calibre/gui2/qt_file_dialogs.py | 6 +-
src/calibre/gui2/search_restriction_mixin.py | 2 +-
src/calibre/gui2/splash_screen.py | 2 +-
src/calibre/gui2/store/search/search.py | 4 +-
.../gui2/store/stores/amazon_es_plugin.py | 2 +-
.../gui2/store/stores/amazon_it_plugin.py | 2 +-
src/calibre/gui2/store/stores/bn_plugin.py | 2 +-
.../gui2/store/stores/chitanka_plugin.py | 2 +-
.../gui2/store/stores/ebooks_com_plugin.py | 2 +-
.../store/stores/ebookshoppe_uk_plugin.py | 2 +-
.../gui2/store/stores/gutenberg_plugin.py | 2 +-
.../gui2/store/stores/litres_plugin.py | 2 +-
src/calibre/gui2/tag_browser/model.py | 6 +-
src/calibre/gui2/tag_browser/ui.py | 2 +-
src/calibre/gui2/tag_browser/view.py | 6 +-
src/calibre/gui2/toc/location.py | 4 +-
src/calibre/gui2/tools.py | 2 +-
src/calibre/gui2/tts/types.py | 2 +-
src/calibre/gui2/tweak_book/char_select.py | 2 +-
src/calibre/gui2/tweak_book/diff/main.py | 2 +-
src/calibre/gui2/tweak_book/diff/view.py | 2 +-
.../gui2/tweak_book/editor/syntax/css.py | 2 +-
.../tweak_book/editor/syntax/javascript.py | 4 +-
.../editor/syntax/pygments_highlighter.py | 4 +-
src/calibre/gui2/tweak_book/preferences.py | 2 +-
src/calibre/gui2/tweak_book/preview.py | 2 +-
src/calibre/gui2/viewer/lookup.py | 2 +-
src/calibre/gui2/viewer/ui.py | 8 +-
src/calibre/gui2/viewer/web_view.py | 6 +-
src/calibre/gui2/widgets.py | 104 +-
src/calibre/gui2/win_file_dialogs.py | 2 +-
src/calibre/gui2/wizard/__init__.py | 6 +-
src/calibre/gui2/wizard/send_email.py | 6 +-
src/calibre/library/catalogs/bibtex.py | 42 +-
src/calibre/library/catalogs/csv_xml.py | 10 +-
src/calibre/library/catalogs/epub_mobi.py | 70 +-
.../library/catalogs/epub_mobi_builder.py | 924 +++++------
src/calibre/library/catalogs/utils.py | 70 +-
src/calibre/library/coloring.py | 24 +-
src/calibre/library/database2.py | 10 +-
src/calibre/library/prefs.py | 4 +-
src/calibre/libunzip.py | 4 +-
src/calibre/linux.py | 20 +-
src/calibre/ptempfile.py | 16 +-
src/calibre/rpdb.py | 8 +-
src/calibre/scraper/test_fetch_backend.py | 2 +-
src/calibre/spell/import_from.py | 4 +-
src/calibre/srv/ajax.py | 4 +-
src/calibre/srv/auth.py | 10 +-
src/calibre/srv/http_request.py | 28 +-
src/calibre/srv/http_response.py | 26 +-
src/calibre/srv/legacy.py | 8 +-
src/calibre/srv/loop.py | 14 +-
src/calibre/srv/metadata.py | 2 +-
src/calibre/srv/opds.py | 12 +-
src/calibre/srv/pre_activated.py | 8 +-
src/calibre/srv/routes.py | 2 +-
src/calibre/srv/standalone.py | 2 +-
src/calibre/srv/tests/content.py | 8 +-
src/calibre/srv/tests/http.py | 2 +-
src/calibre/srv/tests/web_sockets.py | 6 +-
src/calibre/srv/utils.py | 28 +-
src/calibre/srv/web_socket.py | 8 +-
src/calibre/test_build.py | 6 +-
src/calibre/translations/msgfmt.py | 22 +-
src/calibre/utils/bibtex.py | 24 +-
src/calibre/utils/certgen.py | 2 +-
src/calibre/utils/cleantext.py | 6 +-
src/calibre/utils/config.py | 22 +-
src/calibre/utils/config_base.py | 4 +-
src/calibre/utils/ffml_processor.py | 28 +-
src/calibre/utils/fonts/sfnt/__init__.py | 4 +-
src/calibre/utils/fonts/sfnt/cff/constants.py | 186 +--
src/calibre/utils/fonts/sfnt/cff/dict_data.py | 46 +-
src/calibre/utils/fonts/sfnt/cff/table.py | 2 +-
src/calibre/utils/fonts/sfnt/loca.py | 2 +-
src/calibre/utils/formatter.py | 62 +-
src/calibre/utils/formatter_functions.py | 16 +-
src/calibre/utils/icu_test.py | 6 +-
src/calibre/utils/img.py | 32 +-
src/calibre/utils/imghdr.py | 26 +-
src/calibre/utils/inotify.py | 10 +-
src/calibre/utils/iphlpapi.py | 8 +-
src/calibre/utils/ipython.py | 10 +-
src/calibre/utils/iso8601.py | 2 +-
src/calibre/utils/linux_trash.py | 12 +-
src/calibre/utils/matcher.py | 2 +-
src/calibre/utils/mem.py | 8 +-
src/calibre/utils/mreplace.py | 4 +-
src/calibre/utils/network.py | 4 +-
src/calibre/utils/opensearch/description.py | 2 +-
src/calibre/utils/podofo/__init__.py | 2 +-
src/calibre/utils/rapydscript.py | 2 +-
src/calibre/utils/search_query_parser.py | 2 +-
src/calibre/utils/shm.py | 10 +-
src/calibre/utils/smartypants.py | 230 +--
src/calibre/utils/smtp.py | 2 +-
src/calibre/utils/smtplib.py | 180 +--
src/calibre/utils/terminal.py | 24 +-
src/calibre/utils/text2int.py | 26 +-
src/calibre/utils/threadpool.py | 72 +-
src/calibre/utils/titlecase.py | 14 +-
src/calibre/utils/unicode_names.py | 2 +-
src/calibre/utils/winreg/dde.py | 2 +-
src/calibre/utils/winreg/lib.py | 2 +-
src/calibre/utils/wordcount.py | 18 +-
src/calibre/utils/zipfile.py | 278 ++--
src/calibre/web/__init__.py | 6 +-
src/calibre/web/feeds/__init__.py | 2 +-
src/calibre/web/feeds/news.py | 8 +-
src/calibre/web/feeds/templates.py | 20 +-
src/odf/attrconverters.py | 62 +-
src/odf/easyliststyle.py | 22 +-
src/odf/element.py | 142 +-
src/odf/grammar.py | 4 +-
src/odf/load.py | 8 +-
src/odf/namespaces.py | 84 +-
src/odf/odf2moinmoin.py | 246 +--
src/odf/odf2xhtml.py | 832 +++++-----
src/odf/odfmanifest.py | 12 +-
src/odf/office.py | 10 +-
src/odf/opendocument.py | 142 +-
src/odf/teletype.py | 24 +-
src/odf/thumbnail.py | 8 +-
src/odf/userfield.py | 36 +-
src/qt/__init__.py | 2 +-
750 files changed, 8704 insertions(+), 8698 deletions(-)
diff --git a/manual/custom.py b/manual/custom.py
index ee8f429cdd..cd588b5c28 100644
--- a/manual/custom.py
+++ b/manual/custom.py
@@ -240,14 +240,14 @@ def generate_ebook_convert_help(preamble, app):
parser, plumber = create_option_parser(['ebook-convert',
'dummyi.'+sorted(pl.file_types)[0], 'dummyo.epub', '-h'], default_log)
groups = [(pl.name+ ' Options', '', g.option_list) for g in
- parser.option_groups if g.title == "INPUT OPTIONS"]
+ parser.option_groups if g.title == 'INPUT OPTIONS']
prog = 'ebook-convert-'+(pl.name.lower().replace(' ', '-'))
raw += '\n\n' + '\n'.join(render_options(prog, groups, False, True))
for pl in sorted(output_format_plugins(), key=lambda x: x.name):
parser, plumber = create_option_parser(['ebook-convert', 'd.epub',
'dummyi.'+pl.file_type, '-h'], default_log)
groups = [(pl.name+ ' Options', '', g.option_list) for g in
- parser.option_groups if g.title == "OUTPUT OPTIONS"]
+ parser.option_groups if g.title == 'OUTPUT OPTIONS']
prog = 'ebook-convert-'+(pl.name.lower().replace(' ', '-'))
raw += '\n\n' + '\n'.join(render_options(prog, groups, False, True))
diff --git a/manual/plugin_examples/interface_demo/main.py b/manual/plugin_examples/interface_demo/main.py
index c3c35f714f..2e6fd25df7 100644
--- a/manual/plugin_examples/interface_demo/main.py
+++ b/manual/plugin_examples/interface_demo/main.py
@@ -55,7 +55,7 @@ class DemoDialog(QDialog):
self.l.addWidget(self.view_button)
self.update_metadata_button = QPushButton(
- 'Update metadata in a book\'s files', self)
+ "Update metadata in a book's files", self)
self.update_metadata_button.clicked.connect(self.update_metadata)
self.l.addWidget(self.update_metadata_button)
diff --git a/recipes/1843.recipe b/recipes/1843.recipe
index 3361c721ed..81c1437ee4 100644
--- a/recipes/1843.recipe
+++ b/recipes/1843.recipe
@@ -61,7 +61,7 @@ if use_archive:
body = root.xpath('//body')[0]
article = E(body, 'article')
E(article, 'div', data['flyTitle'] , style='color: red; font-size:small; font-weight:bold;')
- E(article, 'h1', data['title'], title=safe_dict(data, "url", "canonical") or '')
+ E(article, 'h1', data['title'], title=safe_dict(data, 'url', 'canonical') or '')
E(article, 'div', data['rubric'], style='font-style: italic; color:#202020;')
try:
date = data['dateModified']
@@ -157,7 +157,7 @@ class Economist(BasicNewsRecipe):
encoding = 'utf-8'
masthead_url = 'https://www.livemint.com/lm-img/dev/economist-logo-oneline.png'
- __author__ = "Kovid Goyal"
+ __author__ = 'Kovid Goyal'
description = (
'Published since September 1843 to take part in “a severe contest between intelligence, which presses forward, and '
'an unworthy, timid ignorance obstructing our progress.”'
@@ -170,7 +170,7 @@ class Economist(BasicNewsRecipe):
resolve_internal_links = True
remove_tags = [
dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent', 'aside', 'footer']),
- dict(attrs={'aria-label': "Article Teaser"}),
+ dict(attrs={'aria-label': 'Article Teaser'}),
dict(attrs={
'class': [
'dblClkTrk', 'ec-article-info', 'share_inline_header',
@@ -224,7 +224,7 @@ class Economist(BasicNewsRecipe):
def parse_index(self):
# return self.economist_test_article()
soup = self.index_to_soup('https://www.economist.com/hidden-content/1843magazine-hub')
- script_tag = soup.find("script", id="__NEXT_DATA__")
+ script_tag = soup.find('script', id='__NEXT_DATA__')
if script_tag is None:
raise ValueError('No script tag with JSON data found in the weeklyedition archive')
data = json.loads(script_tag.string)
@@ -247,20 +247,20 @@ class Economist(BasicNewsRecipe):
self.description = data['description']
feeds_dict = defaultdict(list)
- for part in safe_dict(data, "hasPart", "parts"):
+ for part in safe_dict(data, 'hasPart', 'parts'):
section = part['title']
self.log(section)
- for art in safe_dict(part, "hasPart", "parts"):
- title = safe_dict(art, "title")
- desc = safe_dict(art, "rubric") or ''
- sub = safe_dict(art, "flyTitle") or ''
+ for art in safe_dict(part, 'hasPart', 'parts'):
+ title = safe_dict(art, 'title')
+ desc = safe_dict(art, 'rubric') or ''
+ sub = safe_dict(art, 'flyTitle') or ''
if sub and section != sub:
desc = sub + ' :: ' + desc
pt = PersistentTemporaryFile('.html')
pt.write(json.dumps(art).encode('utf-8'))
pt.close()
url = 'file:///' + pt.name
- feeds_dict[section].append({"title": title, "url": url, "description": desc})
+ feeds_dict[section].append({'title': title, 'url': url, 'description': desc})
self.log('\t', title, '\n\t\t', desc)
return [(section, articles) for section, articles in feeds_dict.items()]
@@ -311,26 +311,26 @@ class Economist(BasicNewsRecipe):
return ans
def economist_parse_index(self, soup):
- script_tag = soup.find("script", id="__NEXT_DATA__")
+ script_tag = soup.find('script', id='__NEXT_DATA__')
if script_tag is not None:
data = json.loads(script_tag.string)
# open('/t/raw.json', 'w').write(json.dumps(data, indent=2, sort_keys=True))
- self.title = safe_dict(data, "props", "pageProps", "content", "headline")
+ self.title = safe_dict(data, 'props', 'pageProps', 'content', 'headline')
# self.cover_url = 'https://mma.prnewswire.com/media/2275620/The_Economist_The_World_Ahead_2024.jpg?w=600'
feeds = []
- for coll in safe_dict(data, "props", "pageProps", "content", "collections"):
- section = safe_dict(coll, "headline") or ''
+ for coll in safe_dict(data, 'props', 'pageProps', 'content', 'collections'):
+ section = safe_dict(coll, 'headline') or ''
self.log(section)
articles = []
- for part in safe_dict(coll, "hasPart", "parts"):
- title = safe_dict(part, "headline") or ''
- url = safe_dict(part, "url", "canonical") or ''
+ for part in safe_dict(coll, 'hasPart', 'parts'):
+ title = safe_dict(part, 'headline') or ''
+ url = safe_dict(part, 'url', 'canonical') or ''
if not title or not url:
continue
- desc = safe_dict(part, "description") or ''
- sub = safe_dict(part, "subheadline") or ''
+ desc = safe_dict(part, 'description') or ''
+ sub = safe_dict(part, 'subheadline') or ''
if sub:
desc = sub + ' :: ' + desc
self.log('\t', title, '\n\t', desc, '\n\t\t', url)
diff --git a/recipes/20_minutos.recipe b/recipes/20_minutos.recipe
index fd0d2d029f..02e447c633 100644
--- a/recipes/20_minutos.recipe
+++ b/recipes/20_minutos.recipe
@@ -47,11 +47,11 @@ class AdvancedUserRecipe1294946868(BasicNewsRecipe):
dict(name='ol', attrs={'class': ['navigation', ]}), dict(name='span', attrs={'class': ['action']}), dict(name='div', attrs={'class': ['twitter comments-list hidden', 'related-news', 'col', 'photo-gallery', 'photo-gallery side-art-block', 'calendario', 'article-comment', 'postto estirar', 'otras_vinetas estirar', 'kment', 'user-actions']}), dict( name='div', attrs={'id': ['twitter-destacados', 'eco-tabs', 'inner', 'vineta_calendario', 'vinetistas clearfix', 'otras_vinetas estirar', 'MIN1', 'main', 'SUP1', 'INT']}), dict(name='ul', attrs={'class': ['article-user-actions', 'stripped-list']}), dict(name='ul', attrs={'id': ['site-links']}), dict(name='li', attrs={'class': ['puntuacion', 'enviar', 'compartir']}) # noqa: E501
]
- extra_css = """
+ extra_css = '''
p{text-align: justify; font-size: 100%}
body{ text-align: left; font-size:100% }
h3{font-family: sans-serif; font-size:150%; font-weight:bold; text-align: justify; }
- """
+ '''
preprocess_regexps = [(re.compile(
r'', re.DOTALL), lambda m: '')]
diff --git a/recipes/DrawAndCook.recipe b/recipes/DrawAndCook.recipe
index 95a74b8cfa..7916fe512b 100644
--- a/recipes/DrawAndCook.recipe
+++ b/recipes/DrawAndCook.recipe
@@ -28,7 +28,7 @@ class DrawAndCook(BasicNewsRecipe):
def parse_index(self):
feeds = []
for title, url in [
- ("They Draw and Cook", "http://www.theydrawandcook.com/")
+ ('They Draw and Cook', 'http://www.theydrawandcook.com/')
]:
articles = self.make_links(url)
if articles:
diff --git a/recipes/TheMITPressReader.recipe b/recipes/TheMITPressReader.recipe
index 84417ce803..c64226e771 100644
--- a/recipes/TheMITPressReader.recipe
+++ b/recipes/TheMITPressReader.recipe
@@ -5,11 +5,11 @@ from calibre.web.feeds.news import BasicNewsRecipe
class TheMITPressReader(BasicNewsRecipe):
- title = "The MIT Press Reader"
+ title = 'The MIT Press Reader'
__author__ = 'yodha8'
language = 'en'
- description = ("Thought-provoking excerpts, interviews and essays backed by academic rigor written by MIT Press authors."
- " This recipe pulls articles from the past 7 days.")
+ description = ('Thought-provoking excerpts, interviews and essays backed by academic rigor written by MIT Press authors.'
+ ' This recipe pulls articles from the past 7 days.')
oldest_article = 7
max_articles_per_feed = 100
auto_cleanup = True
diff --git a/recipes/abc_es.recipe b/recipes/abc_es.recipe
index f5b036b359..0f902a0202 100644
--- a/recipes/abc_es.recipe
+++ b/recipes/abc_es.recipe
@@ -47,13 +47,13 @@ class AdvancedUserRecipe1296604369(BasicNewsRecipe):
if d and isinstance(d, str):
self.oldest_article = float(d)
- extra_css = """
+ extra_css = '''
p{text-align: justify; font-size: 100%}
body{ text-align: left; font-size:100% }
h3{font-family: sans-serif; font-size:120%; font-weight:bold; text-align: justify; }
h2{font-family: sans-serif; font-size:100%; font-weight:bold; text-align: justify; }
h1{font-family: sans-serif; font-size:150%; font-weight:bold; text-align: justify; }
- """
+ '''
feeds = [
diff --git a/recipes/acrimed.recipe b/recipes/acrimed.recipe
index da796681e4..d3229562bd 100644
--- a/recipes/acrimed.recipe
+++ b/recipes/acrimed.recipe
@@ -28,6 +28,6 @@ class Acrimed(BasicNewsRecipe):
lambda m: '' + m.group(1) + ''),
(re.compile(r'(.*) - Acrimed \| Action Critique M.*dias
'), lambda m: '' + m.group(1) + '
')]
- extra_css = """
+ extra_css = '''
.chapo{font-style:italic; margin: 1em 0 0.5em}
- """
+ '''
diff --git a/recipes/adventuregamers.recipe b/recipes/adventuregamers.recipe
index e1c5ddfd20..7a019c1512 100644
--- a/recipes/adventuregamers.recipe
+++ b/recipes/adventuregamers.recipe
@@ -21,7 +21,7 @@ class AdventureGamers(BasicNewsRecipe):
remove_javascript = True
use_embedded_content = False
INDEX = u'http://www.adventuregamers.com'
- extra_css = """
+ extra_css = '''
.pageheader_type{font-size: x-large; font-weight: bold; color: #828D74}
.pageheader_title,.page_title{font-size: xx-large; color: #394128}
.pageheader_byline{font-size: small; font-weight: bold; color: #394128}
@@ -32,7 +32,7 @@ class AdventureGamers(BasicNewsRecipe):
.score_header{font-size: large; color: #50544A}
img{margin-bottom: 1em;}
body{font-family: 'Open Sans',Helvetica,Arial,sans-serif}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/afr.recipe b/recipes/afr.recipe
index d43cb046cb..ef4e31b578 100644
--- a/recipes/afr.recipe
+++ b/recipes/afr.recipe
@@ -14,7 +14,7 @@ class afr(BasicNewsRecipe):
description = (
'For more than 65 years The Australian Financial Review has been the authority on business,'
' finance and investment news in Australia. It has a reputation for independent, award-winning '
- 'journalism and is essential reading for Australia\'s business and investor community.'
+ "journalism and is essential reading for Australia's business and investor community."
)
masthead_url = 'https://www.nineforbrands.com.au/wp-content/uploads/2020/08/AFR-DHOSP-Logo-black-RGB.png'
encoding = 'utf-8'
diff --git a/recipes/afrique_21.recipe b/recipes/afrique_21.recipe
index df24e4d17c..2c1e5ccbcc 100644
--- a/recipes/afrique_21.recipe
+++ b/recipes/afrique_21.recipe
@@ -36,9 +36,9 @@ class AfriqueXXIRecipe(BasicNewsRecipe):
'''
def default_cover(self, cover_file):
- """
+ '''
Crée une couverture personnalisée avec le logo
- """
+ '''
from qt.core import QColor, QFont, QImage, QPainter, QPen, QRect, Qt
from calibre.gui2 import ensure_app, load_builtin_fonts, pixmap_to_data
@@ -54,7 +54,7 @@ class AfriqueXXIRecipe(BasicNewsRecipe):
weekday = french_weekday[wkd]
month = french_month[today.month]
- date_str = f"{weekday} {today.day} {month} {today.year}"
+ date_str = f'{weekday} {today.day} {month} {today.year}'
edition = today.strftime('Édition de %Hh')
# Image de base
diff --git a/recipes/al_jazeera.recipe b/recipes/al_jazeera.recipe
index ed7957dccf..20817e7ca9 100644
--- a/recipes/al_jazeera.recipe
+++ b/recipes/al_jazeera.recipe
@@ -21,9 +21,9 @@ class AlJazeera(BasicNewsRecipe):
max_articles_per_feed = 100
no_stylesheets = True
use_embedded_content = False
- extra_css = """
+ extra_css = '''
body{font-family: Arial,sans-serif}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category,
'publisher': publisher, 'language': language
diff --git a/recipes/al_monitor.recipe b/recipes/al_monitor.recipe
index 9c3f214b6b..e9fad7b272 100644
--- a/recipes/al_monitor.recipe
+++ b/recipes/al_monitor.recipe
@@ -110,7 +110,7 @@ class AlMonitor(BasicNewsRecipe):
title = title[0:120] + '...'
href = link.get('href')
if not href:
- self._p("BAD HREF: " + str(link))
+ self._p('BAD HREF: ' + str(link))
return
self.queue_article_link(section, href, title)
@@ -158,7 +158,7 @@ class AlMonitor(BasicNewsRecipe):
age = (datetime.datetime.now() - date).days
if (age > self.oldest_article):
- return "too old"
+ return 'too old'
return False
def scrape_article_date(self, soup):
@@ -174,7 +174,7 @@ class AlMonitor(BasicNewsRecipe):
def date_from_string(self, datestring):
try:
# eg: Posted September 17, 2014
- dt = datetime.datetime.strptime(datestring, "Posted %B %d, %Y")
+ dt = datetime.datetime.strptime(datestring, 'Posted %B %d, %Y')
except:
dt = None
diff --git a/recipes/albert_mohler.recipe b/recipes/albert_mohler.recipe
index a85063290a..f92c54a9db 100644
--- a/recipes/albert_mohler.recipe
+++ b/recipes/albert_mohler.recipe
@@ -5,7 +5,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class AlbertMohlersBlog(BasicNewsRecipe):
- title = u'Albert Mohler\'s Blog'
+ title = u"Albert Mohler's Blog"
__author__ = 'Peter Grungi'
language = 'en'
oldest_article = 90
@@ -16,5 +16,5 @@ class AlbertMohlersBlog(BasicNewsRecipe):
language = 'en'
author = 'Albert Mohler'
- feeds = [(u'Albert Mohler\'s Blog',
+ feeds = [(u"Albert Mohler's Blog",
u'http://feeds.feedburner.com/AlbertMohlersBlog?format=xml')]
diff --git a/recipes/ald.recipe b/recipes/ald.recipe
index e34e6b90ed..88d30aeac0 100644
--- a/recipes/ald.recipe
+++ b/recipes/ald.recipe
@@ -43,7 +43,7 @@ class ALD(BasicNewsRecipe):
# Extract a list of dates from the page.
# Subset this out to the list of target dates for extraction.
date_list = []
- for div in soup.findAll('div', attrs={'id': "dayheader"}):
+ for div in soup.findAll('div', attrs={'id': 'dayheader'}):
date_list.append(self.tag_to_string(div))
date_list_clean = [re.sub(r'[^\w]', ' ', date) for date in date_list]
date_list_bool = [
@@ -54,14 +54,14 @@ class ALD(BasicNewsRecipe):
# Process each paragraph one by one.
# Stop when the text of the previous div is not in the target date list.
- for div in soup.findAll('div', attrs={'class': "mobile-front"}):
+ for div in soup.findAll('div', attrs={'class': 'mobile-front'}):
for p in div.findAll('p'):
if self.tag_to_string(p.findPreviousSibling('div')) in compress_date:
if p.find('a'):
title = self.tag_to_string(p)
link = p.find('a')['href']
if self.tag_to_string(p.findPreviousSibling('h3')
- ) == "Articles of Note":
+ ) == 'Articles of Note':
articles_note.append({
'title': title,
'url': link,
@@ -69,7 +69,7 @@ class ALD(BasicNewsRecipe):
'date': ''
})
elif self.tag_to_string(p.findPreviousSibling('h3')
- ) == "New Books":
+ ) == 'New Books':
new_books.append({
'title': title,
'url': link,
diff --git a/recipes/alternatives_economiques.recipe b/recipes/alternatives_economiques.recipe
index 4845283694..d341bfc683 100644
--- a/recipes/alternatives_economiques.recipe
+++ b/recipes/alternatives_economiques.recipe
@@ -38,7 +38,7 @@ class AlternativesEconomiques(BasicNewsRecipe):
self.log('Cover URL found:', cover_url)
return cover_url
- self.log('Aucune couverture trouvée, utilisation de l\'image par défaut')
+ self.log("Aucune couverture trouvée, utilisation de l'image par défaut")
return 'https://www.alternatives-economiques.fr/sites/all/themes/alternatives-economiques-main/assets/logo-alternatives-economiques.svg'
except Exception as e:
diff --git a/recipes/am730.recipe b/recipes/am730.recipe
index c7d35ac2f9..0d79189cbf 100644
--- a/recipes/am730.recipe
+++ b/recipes/am730.recipe
@@ -58,7 +58,7 @@ class AM730(BasicNewsRecipe):
articles = []
for aTag in soup.findAll('a',attrs={'class':'newsimglink'}):
href = aTag.get('href',False)
- if not href.encode("utf-8").startswith(url.encode("utf-8")) :
+ if not href.encode('utf-8').startswith(url.encode('utf-8')) :
continue # not in same section
title = href.split('/')[-1].split('-')[0]
diff --git a/recipes/ambito.recipe b/recipes/ambito.recipe
index 7a5a177cc6..ad83cc6605 100644
--- a/recipes/ambito.recipe
+++ b/recipes/ambito.recipe
@@ -28,9 +28,9 @@ class Ambito(BasicNewsRecipe):
language = 'es_AR'
publication_type = 'newsportal'
masthead_url = 'https://www.ambito.com/css-custom/239/images/logo-239-2020v2.svg'
- extra_css = """
+ extra_css = '''
body{font-family: Roboto, sans-serif}
- """
+ '''
conversion_options = {
'comment': description,
diff --git a/recipes/american_thinker.recipe b/recipes/american_thinker.recipe
index 0f1480c06a..b054407f86 100644
--- a/recipes/american_thinker.recipe
+++ b/recipes/american_thinker.recipe
@@ -12,7 +12,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class AmericanThinker(BasicNewsRecipe):
title = u'American Thinker'
- description = "American Thinker is a daily internet publication devoted to the thoughtful exploration of issues of importance to Americans."
+ description = 'American Thinker is a daily internet publication devoted to the thoughtful exploration of issues of importance to Americans.'
__author__ = 'Walt Anthony'
publisher = 'Thomas Lifson'
category = 'news, politics, USA'
diff --git a/recipes/anandtech.recipe b/recipes/anandtech.recipe
index aa29ed443c..8ca0011757 100644
--- a/recipes/anandtech.recipe
+++ b/recipes/anandtech.recipe
@@ -39,4 +39,4 @@ class anan(BasicNewsRecipe):
def print_version(self, url):
# return url.replace("0Cshow0C", "0Cprint0C") # 2013-09-07 AGE: update
- return url.replace("/show/", "/print/") # 2014-02-27 AGE: update
+ return url.replace('/show/', '/print/') # 2014-02-27 AGE: update
diff --git a/recipes/ancient_egypt.recipe b/recipes/ancient_egypt.recipe
index c40b0aa3cc..d88a181ea3 100644
--- a/recipes/ancient_egypt.recipe
+++ b/recipes/ancient_egypt.recipe
@@ -12,7 +12,7 @@ class ancientegypt(BasicNewsRecipe):
language = 'en'
__author__ = 'unkn0wn'
description = (
- 'Ancient Egypt is the world\'s leading Egyptology magazine, exploring the history, people and culture of the Nile Valley. '
+ "Ancient Egypt is the world's leading Egyptology magazine, exploring the history, people and culture of the Nile Valley. "
'Now in a larger format with a fresh new design, AE brings you the latest news and discoveries, and feature articles covering '
'more than 5000 years of Egyptian history. Published bimonthly.'
)
diff --git a/recipes/andhrajyothy_ap.recipe b/recipes/andhrajyothy_ap.recipe
index 041098c8ca..f2833ddb18 100644
--- a/recipes/andhrajyothy_ap.recipe
+++ b/recipes/andhrajyothy_ap.recipe
@@ -75,7 +75,7 @@ class andhra(BasicNewsRecipe):
url = str(snaps['OrgId'])
if snaps['ObjectType'] == 4:
continue
- feeds_dict[section].append({"title": '', "url": url})
+ feeds_dict[section].append({'title': '', 'url': url})
return [(section, articles) for section, articles in feeds_dict.items()]
def preprocess_raw_html(self, raw, *a):
diff --git a/recipes/andhrajyothy_tel.recipe b/recipes/andhrajyothy_tel.recipe
index 37cba5ebfb..ff7195112b 100644
--- a/recipes/andhrajyothy_tel.recipe
+++ b/recipes/andhrajyothy_tel.recipe
@@ -75,7 +75,7 @@ class andhra(BasicNewsRecipe):
url = str(snaps['OrgId'])
if snaps['ObjectType'] == 4:
continue
- feeds_dict[section].append({"title": '', "url": url})
+ feeds_dict[section].append({'title': '', 'url': url})
return [(section, articles) for section, articles in feeds_dict.items()]
def preprocess_raw_html(self, raw, *a):
diff --git a/recipes/arcamax.recipe b/recipes/arcamax.recipe
index c255442669..aef1ca6a26 100644
--- a/recipes/arcamax.recipe
+++ b/recipes/arcamax.recipe
@@ -66,19 +66,19 @@ class Arcamax(BasicNewsRecipe):
# (u"9 Chickweed Lane", u"https://www.arcamax.com/thefunnies/ninechickweedlane"),
# (u"Agnes", u"https://www.arcamax.com/thefunnies/agnes"),
# (u"Andy Capp", u"https://www.arcamax.com/thefunnies/andycapp"),
- (u"BC", u"https://www.arcamax.com/thefunnies/bc"),
+ (u'BC', u'https://www.arcamax.com/thefunnies/bc'),
# (u"Baby Blues", u"https://www.arcamax.com/thefunnies/babyblues"),
# (u"Beetle Bailey", u"https://www.arcamax.com/thefunnies/beetlebailey"),
- (u"Blondie", u"https://www.arcamax.com/thefunnies/blondie"),
+ (u'Blondie', u'https://www.arcamax.com/thefunnies/blondie'),
# u"Boondocks", u"https://www.arcamax.com/thefunnies/boondocks"),
# (u"Cathy", u"https://www.arcamax.com/thefunnies/cathy"),
# (u"Daddys Home", u"https://www.arcamax.com/thefunnies/daddyshome"),
# (u"Dinette Set", u"https://www.arcamax.com/thefunnies/thedinetteset"),
- (u"Dog Eat Doug", u"https://www.arcamax.com/thefunnies/dogeatdoug"),
+ (u'Dog Eat Doug', u'https://www.arcamax.com/thefunnies/dogeatdoug'),
# (u"Doonesbury", u"https://www.arcamax.com/thefunnies/doonesbury"),
# (u"Dustin", u"https://www.arcamax.com/thefunnies/dustin"),
- (u"Family Circus", u"https://www.arcamax.com/thefunnies/familycircus"),
- (u"Garfield", u"https://www.arcamax.com/thefunnies/garfield"),
+ (u'Family Circus', u'https://www.arcamax.com/thefunnies/familycircus'),
+ (u'Garfield', u'https://www.arcamax.com/thefunnies/garfield'),
# (u"Get Fuzzy", u"https://www.arcamax.com/thefunnies/getfuzzy"),
# (u"Girls and Sports", u"https://www.arcamax.com/thefunnies/girlsandsports"),
# (u"Hagar the Horrible", u"https://www.arcamax.com/thefunnies/hagarthehorrible"),
@@ -87,16 +87,16 @@ class Arcamax(BasicNewsRecipe):
# (u"Luann", u"https://www.arcamax.com/thefunnies/luann"),
# (u"Momma", u"https://www.arcamax.com/thefunnies/momma"),
# (u"Mother Goose and Grimm", u"https://www.arcamax.com/thefunnies/mothergooseandgrimm"),
- (u"Mutts", u"https://www.arcamax.com/thefunnies/mutts"),
+ (u'Mutts', u'https://www.arcamax.com/thefunnies/mutts'),
# (u"Non Sequitur", u"https://www.arcamax.com/thefunnies/nonsequitur"),
# (u"Pearls Before Swine", u"https://www.arcamax.com/thefunnies/pearlsbeforeswine"),
# (u"Pickles", u"https://www.arcamax.com/thefunnies/pickles"),
# (u"Red and Rover", u"https://www.arcamax.com/thefunnies/redandrover"),
# (u"Rubes", u"https://www.arcamax.com/thefunnies/rubes"),
# (u"Rugrats", u"https://www.arcamax.com/thefunnies/rugrats"),
- (u"Speed Bump", u"https://www.arcamax.com/thefunnies/speedbump"),
- (u"Wizard of Id", u"https://www.arcamax.com/thefunnies/wizardofid"),
- (u"Zits", u"https://www.arcamax.com/thefunnies/zits"),
+ (u'Speed Bump', u'https://www.arcamax.com/thefunnies/speedbump'),
+ (u'Wizard of Id', u'https://www.arcamax.com/thefunnies/wizardofid'),
+ (u'Zits', u'https://www.arcamax.com/thefunnies/zits'),
]:
self.log('Finding strips for:', title)
articles = self.make_links(url, title)
diff --git a/recipes/arret_sur_images.recipe b/recipes/arret_sur_images.recipe
index d4b3520430..70a2a21fbc 100644
--- a/recipes/arret_sur_images.recipe
+++ b/recipes/arret_sur_images.recipe
@@ -13,7 +13,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class ArretSurImages(BasicNewsRecipe):
title = 'Arrêt sur Images'
- description = 'Site français d\'analyse des médias'
+ description = "Site français d'analyse des médias"
language = 'fr'
encoding = 'utf-8'
needs_subscription = True
@@ -27,9 +27,9 @@ class ArretSurImages(BasicNewsRecipe):
]
def default_cover(self, cover_file):
- """
+ '''
Crée une couverture personnalisée avec le logo ASI
- """
+ '''
from qt.core import QColor, QFont, QImage, QPainter, QPen, QRect, Qt
from calibre.gui2 import ensure_app, load_builtin_fonts, pixmap_to_data
@@ -45,7 +45,7 @@ class ArretSurImages(BasicNewsRecipe):
weekday = french_weekday[wkd]
month = french_month[today.month]
- date_str = f"{weekday} {today.day} {month} {today.year}"
+ date_str = f'{weekday} {today.day} {month} {today.year}'
edition = today.strftime('Édition de %Hh')
img = QImage(1400, 1920, QImage.Format_RGB888)
@@ -123,9 +123,9 @@ class ArretSurImages(BasicNewsRecipe):
br.addheaders += [('Authorization', f'Bearer {auth_response["access_token"]}')]
print('Authentification réussie')
else:
- print('Échec de l\'authentification - Vérifiez vos identifiants')
+ print("Échec de l'authentification - Vérifiez vos identifiants")
except Exception as e:
- print(f'Erreur lors de l\'authentification: {str(e)}')
+ print(f"Erreur lors de l'authentification: {str(e)}")
return br
def get_article_url(self, article):
diff --git a/recipes/asahi_shimbun_en.recipe b/recipes/asahi_shimbun_en.recipe
index 7cdbfbbc8b..5f539dc5b7 100644
--- a/recipes/asahi_shimbun_en.recipe
+++ b/recipes/asahi_shimbun_en.recipe
@@ -1,12 +1,12 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-__license__ = "GPL v3"
-__copyright__ = "2022, Albert Aparicio Isarn "
+__license__ = 'GPL v3'
+__copyright__ = '2022, Albert Aparicio Isarn '
-"""
+'''
https://www.asahi.com/ajw/
-"""
+'''
from datetime import datetime
@@ -14,99 +14,99 @@ from calibre.web.feeds.news import BasicNewsRecipe
class AsahiShimbunEnglishNews(BasicNewsRecipe):
- title = "The Asahi Shimbun"
- __author__ = "Albert Aparicio Isarn"
+ title = 'The Asahi Shimbun'
+ __author__ = 'Albert Aparicio Isarn'
- description = ("The Asahi Shimbun is widely regarded for its journalism as the most respected daily newspaper in Japan."
- " The English version offers selected articles from the vernacular Asahi Shimbun, as well as extensive"
- " coverage of cool Japan,focusing on manga, travel and other timely news.")
- publisher = "The Asahi Shimbun Company"
- publication_type = "newspaper"
- category = "news, japan"
- language = "en_JP"
+ description = ('The Asahi Shimbun is widely regarded for its journalism as the most respected daily newspaper in Japan.'
+ ' The English version offers selected articles from the vernacular Asahi Shimbun, as well as extensive'
+ ' coverage of cool Japan,focusing on manga, travel and other timely news.')
+ publisher = 'The Asahi Shimbun Company'
+ publication_type = 'newspaper'
+ category = 'news, japan'
+ language = 'en_JP'
- index = "https://www.asahi.com"
- masthead_url = "https://p.potaufeu.asahi.com/ajw/css/images/en_logo@2x.png"
+ index = 'https://www.asahi.com'
+ masthead_url = 'https://p.potaufeu.asahi.com/ajw/css/images/en_logo@2x.png'
oldest_article = 3
max_articles_per_feed = 40
no_stylesheets = True
remove_javascript = True
- remove_tags_before = {"id": "MainInner"}
- remove_tags_after = {"class": "ArticleText"}
- remove_tags = [{"name": "div", "class": "SnsUtilityArea"}]
+ remove_tags_before = {'id': 'MainInner'}
+ remove_tags_after = {'class': 'ArticleText'}
+ remove_tags = [{'name': 'div', 'class': 'SnsUtilityArea'}]
def get_whats_new(self):
- soup = self.index_to_soup(self.index + "/ajw/new")
- news_section = soup.find("div", attrs={"class": "specialList"})
+ soup = self.index_to_soup(self.index + '/ajw/new')
+ news_section = soup.find('div', attrs={'class': 'specialList'})
new_news = []
- for item in news_section.findAll("li"):
- title = item.find("p", attrs={"class": "title"}).string
- date_string = item.find("p", attrs={"class": "date"}).next
+ for item in news_section.findAll('li'):
+ title = item.find('p', attrs={'class': 'title'}).string
+ date_string = item.find('p', attrs={'class': 'date'}).next
date = date_string.strip()
- url = self.index + item.find("a")["href"]
+ url = self.index + item.find('a')['href']
new_news.append(
{
- "title": title,
- "date": datetime.strptime(date, "%B %d, %Y").strftime("%Y/%m/%d"),
- "url": url,
- "description": "",
+ 'title': title,
+ 'date': datetime.strptime(date, '%B %d, %Y').strftime('%Y/%m/%d'),
+ 'url': url,
+ 'description': '',
}
)
return new_news
def get_top6(self, soup):
- top = soup.find("ul", attrs={"class": "top6"})
+ top = soup.find('ul', attrs={'class': 'top6'})
top6_news = []
- for item in top.findAll("li"):
- title = item.find("p", attrs={"class": "title"}).string
- date_string = item.find("p", attrs={"class": "date"}).next
+ for item in top.findAll('li'):
+ title = item.find('p', attrs={'class': 'title'}).string
+ date_string = item.find('p', attrs={'class': 'date'}).next
date = date_string.strip()
- url = self.index + item.find("a")["href"]
+ url = self.index + item.find('a')['href']
top6_news.append(
{
- "title": title,
- "date": datetime.strptime(date, "%B %d, %Y").strftime("%Y/%m/%d"),
- "url": url,
- "description": "",
+ 'title': title,
+ 'date': datetime.strptime(date, '%B %d, %Y').strftime('%Y/%m/%d'),
+ 'url': url,
+ 'description': '',
}
)
return top6_news
def get_section_news(self, soup):
- news_grid = soup.find("ul", attrs={"class": "default"})
+ news_grid = soup.find('ul', attrs={'class': 'default'})
news = []
- for item in news_grid.findAll("li"):
- title = item.find("p", attrs={"class": "title"}).string
- date_string = item.find("p", attrs={"class": "date"}).next
+ for item in news_grid.findAll('li'):
+ title = item.find('p', attrs={'class': 'title'}).string
+ date_string = item.find('p', attrs={'class': 'date'}).next
date = date_string.strip()
- url = self.index + item.find("a")["href"]
+ url = self.index + item.find('a')['href']
news.append(
{
- "title": title,
- "date": datetime.strptime(date, "%B %d, %Y").strftime("%Y/%m/%d"),
- "url": url,
- "description": "",
+ 'title': title,
+ 'date': datetime.strptime(date, '%B %d, %Y').strftime('%Y/%m/%d'),
+ 'url': url,
+ 'description': '',
}
)
return news
def get_section(self, section):
- soup = self.index_to_soup(self.index + "/ajw/" + section)
+ soup = self.index_to_soup(self.index + '/ajw/' + section)
section_news_items = self.get_top6(soup)
section_news_items.extend(self.get_section_news(soup))
@@ -114,26 +114,26 @@ class AsahiShimbunEnglishNews(BasicNewsRecipe):
return section_news_items
def get_special_section(self, section):
- soup = self.index_to_soup(self.index + "/ajw/" + section)
- top = soup.find("div", attrs={"class": "Section"})
+ soup = self.index_to_soup(self.index + '/ajw/' + section)
+ top = soup.find('div', attrs={'class': 'Section'})
special_news = []
- for item in top.findAll("li"):
- item_a = item.find("a")
+ for item in top.findAll('li'):
+ item_a = item.find('a')
- text_split = item_a.text.strip().split("\n")
+ text_split = item_a.text.strip().split('\n')
title = text_split[0]
description = text_split[1].strip()
- url = self.index + item_a["href"]
+ url = self.index + item_a['href']
special_news.append(
{
- "title": title,
- "date": "",
- "url": url,
- "description": description,
+ 'title': title,
+ 'date': '',
+ 'url': url,
+ 'description': description,
}
)
@@ -144,24 +144,24 @@ class AsahiShimbunEnglishNews(BasicNewsRecipe):
feeds = [
("What's New", self.get_whats_new()),
- ("National Report", self.get_section("national_report")),
- ("Politics", self.get_section("politics")),
- ("Business", self.get_section("business")),
- ("Asia & World - China", self.get_section("asia_world/china")),
- ("Asia & World - Korean Peninsula", self.get_section("asia_world/korean_peninsula")),
- ("Asia & World - Around Asia", self.get_section("asia_world/around_asia")),
- ("Asia & World - World", self.get_section("asia_world/world")),
- ("Sci & Tech", self.get_section("sci_tech")),
- ("Culture - Style", self.get_section("culture/style")),
+ ('National Report', self.get_section('national_report')),
+ ('Politics', self.get_section('politics')),
+ ('Business', self.get_section('business')),
+ ('Asia & World - China', self.get_section('asia_world/china')),
+ ('Asia & World - Korean Peninsula', self.get_section('asia_world/korean_peninsula')),
+ ('Asia & World - Around Asia', self.get_section('asia_world/around_asia')),
+ ('Asia & World - World', self.get_section('asia_world/world')),
+ ('Sci & Tech', self.get_section('sci_tech')),
+ ('Culture - Style', self.get_section('culture/style')),
# ("Culture - Cooking", self.get_section("culture/cooking")),
- ("Culture - Movies", self.get_section("culture/movies")),
- ("Culture - Manga & Anime", self.get_section("culture/manga_anime")),
- ("Travel", self.get_section("travel")),
- ("Sports", self.get_section("sports")),
- ("Opinion - Editorial", self.get_section("opinion/editorial")),
- ("Opinion - Vox Populi", self.get_section("opinion/vox")),
- ("Opinion - Views", self.get_section("opinion/views")),
- ("Special", self.get_special_section("special")),
+ ('Culture - Movies', self.get_section('culture/movies')),
+ ('Culture - Manga & Anime', self.get_section('culture/manga_anime')),
+ ('Travel', self.get_section('travel')),
+ ('Sports', self.get_section('sports')),
+ ('Opinion - Editorial', self.get_section('opinion/editorial')),
+ ('Opinion - Vox Populi', self.get_section('opinion/vox')),
+ ('Opinion - Views', self.get_section('opinion/views')),
+ ('Special', self.get_special_section('special')),
]
return feeds
diff --git a/recipes/asianreviewofbooks.recipe b/recipes/asianreviewofbooks.recipe
index dfefb5f07e..3a3565c9a3 100644
--- a/recipes/asianreviewofbooks.recipe
+++ b/recipes/asianreviewofbooks.recipe
@@ -26,11 +26,11 @@ class AsianReviewOfBooks(BasicNewsRecipe):
publication_type = 'magazine'
auto_cleanup = True
masthead_url = 'https://i2.wp.com/asianreviewofbooks.com/content/wp-content/uploads/2016/09/ARBwidelogo.png'
- extra_css = """
+ extra_css = '''
body{font-family: "Droid Serif", serif}
.entry-title {font-family: "Playfair Display", serif}
img {display: block}
- """
+ '''
recipe_specific_options = {
'days': {
diff --git a/recipes/ba_herald.recipe b/recipes/ba_herald.recipe
index 91cac54aa8..40758a84e7 100644
--- a/recipes/ba_herald.recipe
+++ b/recipes/ba_herald.recipe
@@ -24,12 +24,12 @@ class BuenosAiresHerald(BasicNewsRecipe):
publication_type = 'newspaper'
masthead_url = 'http://www.buenosairesherald.com/img/logo.jpg'
INDEX = 'http://www.buenosairesherald.com'
- extra_css = """
+ extra_css = '''
body{font-family: Arial,Helvetica,sans-serif }
img{margin-bottom: 0.4em; display:block}
h1{font-family: Georgia,serif}
#fecha{text-align: right; font-size: small}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/bangkokpost.recipe b/recipes/bangkokpost.recipe
index 464ad9800c..259a7bcb6c 100644
--- a/recipes/bangkokpost.recipe
+++ b/recipes/bangkokpost.recipe
@@ -16,7 +16,7 @@ class BangkokPostRecipe(BasicNewsRecipe):
title = u'Bangkok Post'
publisher = u'Post Publishing PCL'
category = u'News'
- description = u'The world\'s window to Thailand'
+ description = u"The world's window to Thailand"
oldest_article = 7
max_articles_per_feed = 100
diff --git a/recipes/barrons.recipe b/recipes/barrons.recipe
index 4ec30b325c..a80ffbcf46 100644
--- a/recipes/barrons.recipe
+++ b/recipes/barrons.recipe
@@ -8,11 +8,11 @@ from calibre.web.feeds.news import BasicNewsRecipe, classes, prefixed_classes
class barrons(BasicNewsRecipe):
- title = 'Barron\'s Magazine'
+ title = "Barron's Magazine"
__author__ = 'unkn0wn'
description = (
- 'Barron\'s is an American weekly magazine/newspaper published by Dow Jones & Company. Founded in 1921 as a sister '
- 'publication to The Wall Street Journal, Barron\'s covers U.S. financial information, market developments, and '
+ "Barron's is an American weekly magazine/newspaper published by Dow Jones & Company. Founded in 1921 as a sister "
+ "publication to The Wall Street Journal, Barron's covers U.S. financial information, market developments, and "
'relevant statistics.'
)
language = 'en_US'
@@ -82,7 +82,7 @@ class barrons(BasicNewsRecipe):
recipe_specific_options = {
'date': {
'short': 'The date of the edition to download (YYYYMMDD format)',
- 'long': 'For example, 20240722.\nIf it didn\'t work, try again later.'
+ 'long': "For example, 20240722.\nIf it didn't work, try again later."
}
}
diff --git a/recipes/bbc.recipe b/recipes/bbc.recipe
index 7302d02d3f..a872d50550 100644
--- a/recipes/bbc.recipe
+++ b/recipes/bbc.recipe
@@ -135,9 +135,9 @@ class BBCNews(BasicNewsRecipe):
# Select / de-select the feeds you want in your ebook.
feeds = [
- ("News Home", "https://feeds.bbci.co.uk/news/rss.xml"),
- ("UK", "https://feeds.bbci.co.uk/news/uk/rss.xml"),
- ("World", "https://feeds.bbci.co.uk/news/world/rss.xml"),
+ ('News Home', 'https://feeds.bbci.co.uk/news/rss.xml'),
+ ('UK', 'https://feeds.bbci.co.uk/news/uk/rss.xml'),
+ ('World', 'https://feeds.bbci.co.uk/news/world/rss.xml'),
# ("England", "https://feeds.bbci.co.uk/news/england/rss.xml"),
# ("Scotland", "https://feeds.bbci.co.uk/news/scotland/rss.xml"),
# ("Wales", "https://feeds.bbci.co.uk/news/wales/rss.xml"),
@@ -147,26 +147,26 @@ class BBCNews(BasicNewsRecipe):
# ("Europe", "https://feeds.bbci.co.uk/news/world/europe/rss.xml"),
# ("Latin America", "https://feeds.bbci.co.uk/news/world/latin_america/rss.xml"),
# ("Middle East", "https://feeds.bbci.co.uk/news/world/middle_east/rss.xml"),
- ("US & Canada", "https://feeds.bbci.co.uk/news/world/us_and_canada/rss.xml"),
- ("Politics", "https://feeds.bbci.co.uk/news/politics/rss.xml"),
- ("Science/Environment",
- "https://feeds.bbci.co.uk/news/science_and_environment/rss.xml"),
- ("Technology", "https://feeds.bbci.co.uk/news/technology/rss.xml"),
- ("Magazine", "https://feeds.bbci.co.uk/news/magazine/rss.xml"),
- ("Entertainment/Arts",
- "https://feeds.bbci.co.uk/news/entertainment_and_arts/rss.xml"),
+ ('US & Canada', 'https://feeds.bbci.co.uk/news/world/us_and_canada/rss.xml'),
+ ('Politics', 'https://feeds.bbci.co.uk/news/politics/rss.xml'),
+ ('Science/Environment',
+ 'https://feeds.bbci.co.uk/news/science_and_environment/rss.xml'),
+ ('Technology', 'https://feeds.bbci.co.uk/news/technology/rss.xml'),
+ ('Magazine', 'https://feeds.bbci.co.uk/news/magazine/rss.xml'),
+ ('Entertainment/Arts',
+ 'https://feeds.bbci.co.uk/news/entertainment_and_arts/rss.xml'),
# ("Health", "https://feeds.bbci.co.uk/news/health/rss.xml"),
# ("Education/Family", "https://feeds.bbci.co.uk/news/education/rss.xml"),
- ("Business", "https://feeds.bbci.co.uk/news/business/rss.xml"),
- ("Special Reports", "https://feeds.bbci.co.uk/news/special_reports/rss.xml"),
- ("Also in the News", "https://feeds.bbci.co.uk/news/also_in_the_news/rss.xml"),
+ ('Business', 'https://feeds.bbci.co.uk/news/business/rss.xml'),
+ ('Special Reports', 'https://feeds.bbci.co.uk/news/special_reports/rss.xml'),
+ ('Also in the News', 'https://feeds.bbci.co.uk/news/also_in_the_news/rss.xml'),
# ("Newsbeat", "https://www.bbc.co.uk/newsbeat/rss.xml"),
# ("Click", "http://newsrss.bbc.co.uk/rss/newsonline_uk_edition/programmes/click_online/rss.xml"),
# ("Blog: Mark D'Arcy (Parliamentary Correspondent)", "https://feeds.bbci.co.uk/news/correspondents/markdarcy/rss.sxml"),
# ("Blog: Robert Peston (Business Editor)", "https://feeds.bbci.co.uk/news/correspondents/robertpeston/rss.sxml"),
# ("Blog: Stephanie Flanders (Economics Editor)", "https://feeds.bbci.co.uk/news/correspondents/stephanieflanders/rss.sxml"),
- ("Sport Front Page",
- "http://newsrss.bbc.co.uk/rss/sportonline_uk_edition/front_page/rss.xml"),
+ ('Sport Front Page',
+ 'http://newsrss.bbc.co.uk/rss/sportonline_uk_edition/front_page/rss.xml'),
# ("Football", "http://newsrss.bbc.co.uk/rss/sportonline_uk_edition/football/rss.xml"),
# ("Cricket", "http://newsrss.bbc.co.uk/rss/sportonline_uk_edition/cricket/rss.xml"),
# ("Rugby Union", "http://newsrss.bbc.co.uk/rss/sportonline_uk_edition/rugby_union/rss.xml"),
diff --git a/recipes/bbc_brasil.recipe b/recipes/bbc_brasil.recipe
index 840c589401..6c7d4fb733 100644
--- a/recipes/bbc_brasil.recipe
+++ b/recipes/bbc_brasil.recipe
@@ -556,19 +556,19 @@ class BBCBrasilRecipe(BasicNewsRecipe):
def print_version(self, url):
# Handle sports page urls type 01:
- if (url.find("go/rss/-/sport1/") != -1):
- temp_url = url.replace("go/rss/-/", "")
+ if (url.find('go/rss/-/sport1/') != -1):
+ temp_url = url.replace('go/rss/-/', '')
# Handle sports page urls type 02:
- elif (url.find("go/rss/int/news/-/sport1/") != -1):
- temp_url = url.replace("go/rss/int/news/-/", "")
+ elif (url.find('go/rss/int/news/-/sport1/') != -1):
+ temp_url = url.replace('go/rss/int/news/-/', '')
# Handle regular news page urls:
else:
- temp_url = url.replace("go/rss/int/news/-/", "")
+ temp_url = url.replace('go/rss/int/news/-/', '')
# Always add "?print=true" to the end of the url.
- print_url = temp_url + "?print=true"
+ print_url = temp_url + '?print=true'
return print_url
diff --git a/recipes/billorielly.recipe b/recipes/billorielly.recipe
index 3369cc49e2..4223df0093 100644
--- a/recipes/billorielly.recipe
+++ b/recipes/billorielly.recipe
@@ -30,7 +30,7 @@ class BillOReilly(BasicNewsRecipe):
feeds.append(("O'Reilly Factor", articles_shows))
if articles_columns:
- feeds.append(("Newspaper Column", articles_columns))
+ feeds.append(('Newspaper Column', articles_columns))
return feeds
diff --git a/recipes/blesk.recipe b/recipes/blesk.recipe
index 8757bf4dcf..0b3380ea60 100644
--- a/recipes/blesk.recipe
+++ b/recipes/blesk.recipe
@@ -27,8 +27,8 @@ class bleskRecipe(BasicNewsRecipe):
cover_url = 'http://img.blesk.cz/images/blesk/blesk-logo.png'
remove_javascript = True
no_stylesheets = True
- extra_css = """
- """
+ extra_css = '''
+ '''
remove_attributes = []
remove_tags_before = dict(name='div', attrs={'id': ['boxContent']})
diff --git a/recipes/blic.recipe b/recipes/blic.recipe
index f674695923..fbd2e463bd 100644
--- a/recipes/blic.recipe
+++ b/recipes/blic.recipe
@@ -23,7 +23,7 @@ class Blic(BasicNewsRecipe):
masthead_url = 'http://www.blic.rs/resources/images/header/header_back.png'
language = 'sr'
publication_type = 'newspaper'
- extra_css = """
+ extra_css = '''
@font-face {font-family: "serif1";src:url(res:///opt/sony/ebook/FONT/tt0011m_.ttf)}
@font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)}
body{font-family: Georgia, serif1, serif}
@@ -35,7 +35,7 @@ class Blic(BasicNewsRecipe):
.potpis{font-size: x-small; color: gray}
.article_info{font-size: small}
img{margin-bottom: 0.8em; margin-top: 0.8em; display: block}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language, 'linearize_tables': True
diff --git a/recipes/bloomberg-business-week.recipe b/recipes/bloomberg-business-week.recipe
index 8d3753fc20..58a5bb185d 100644
--- a/recipes/bloomberg-business-week.recipe
+++ b/recipes/bloomberg-business-week.recipe
@@ -56,7 +56,7 @@ class Bloomberg(BasicNewsRecipe):
masthead_url = 'https://assets.bwbx.io/s3/javelin/public/hub/images/BW-Logo-Black-cc9035fbb3.svg'
description = (
'Bloomberg Businessweek helps global leaders stay ahead with insights and in-depth analysis on the people,'
- ' companies, events, and trends shaping today\'s complex, global economy.'
+ " companies, events, and trends shaping today's complex, global economy."
)
remove_empty_feeds = True
diff --git a/recipes/bookforummagazine.recipe b/recipes/bookforummagazine.recipe
index cab082a8e3..f5e8cbdb98 100644
--- a/recipes/bookforummagazine.recipe
+++ b/recipes/bookforummagazine.recipe
@@ -2,29 +2,29 @@ from urllib.parse import urljoin
from calibre.web.feeds.news import BasicNewsRecipe
-_issue_url = ""
+_issue_url = ''
class BookforumMagazine(BasicNewsRecipe):
- title = "Bookforum"
+ title = 'Bookforum'
description = (
- "Bookforum is an American book review magazine devoted to books and "
- "the discussion of literature. https://www.bookforum.com/print"
+ 'Bookforum is an American book review magazine devoted to books and '
+ 'the discussion of literature. https://www.bookforum.com/print'
)
- language = "en"
- __author__ = "ping"
- publication_type = "magazine"
- encoding = "utf-8"
+ language = 'en'
+ __author__ = 'ping'
+ publication_type = 'magazine'
+ encoding = 'utf-8'
remove_javascript = True
no_stylesheets = True
auto_cleanup = False
compress_news_images = True
compress_news_images_auto_size = 8
- keep_only_tags = [dict(class_="blog-article")]
- remove_tags = [dict(name=["af-share-toggle", "af-related-articles"])]
+ keep_only_tags = [dict(class_='blog-article')]
+ remove_tags = [dict(name=['af-share-toggle', 'af-related-articles'])]
- extra_css = """
+ extra_css = '''
.blog-article__header { font-size: 1.8rem; margin-bottom: 0.4rem; }
.blog-article__subtitle { font-size: 1.2rem; font-style: italic; margin-bottom: 1rem; }
.blog-article__writer { font-size: 1rem; font-weight: bold; color: #444; }
@@ -33,46 +33,46 @@ class BookforumMagazine(BasicNewsRecipe):
display: block; max-width: 100%; height: auto;
}
.blog-article__caption { font-size: 0.8rem; display: block; margin-top: 0.2rem; }
- """
+ '''
def preprocess_html(self, soup):
# strip away links that's not needed
- for ele in soup.select(".blog-article__header a"):
+ for ele in soup.select('.blog-article__header a'):
ele.unwrap()
return soup
def parse_index(self):
soup = self.index_to_soup(
- _issue_url if _issue_url else "https://www.bookforum.com/print"
+ _issue_url if _issue_url else 'https://www.bookforum.com/print'
)
- meta_ele = soup.find("meta", property="og:title")
+ meta_ele = soup.find('meta', property='og:title')
if meta_ele:
self.timefmt = f' [{meta_ele["content"]}]'
- cover_ele = soup.find("img", class_="toc-issue__cover")
+ cover_ele = soup.find('img', class_='toc-issue__cover')
if cover_ele:
self.cover_url = urljoin(
- "https://www.bookforum.com",
- soup.find("img", class_="toc-issue__cover")["src"],
+ 'https://www.bookforum.com',
+ soup.find('img', class_='toc-issue__cover')['src'],
)
articles = {}
- for sect_ele in soup.find_all("div", class_="toc-articles__section"):
+ for sect_ele in soup.find_all('div', class_='toc-articles__section'):
section_name = self.tag_to_string(
- sect_ele.find("a", class_="toc__anchor-links__link")
+ sect_ele.find('a', class_='toc__anchor-links__link')
)
- for article_ele in sect_ele.find_all("article"):
- title_ele = article_ele.find("h1")
- sub_title_ele = article_ele.find(class_="toc-article__subtitle")
+ for article_ele in sect_ele.find_all('article'):
+ title_ele = article_ele.find('h1')
+ sub_title_ele = article_ele.find(class_='toc-article__subtitle')
articles.setdefault(section_name, []).append(
{
- "title": self.tag_to_string(title_ele),
- "url": article_ele.find("a", class_="toc-article__link")[
- "href"
+ 'title': self.tag_to_string(title_ele),
+ 'url': article_ele.find('a', class_='toc-article__link')[
+ 'href'
],
- "description": self.tag_to_string(sub_title_ele)
+ 'description': self.tag_to_string(sub_title_ele)
if sub_title_ele
- else "",
+ else '',
}
)
return articles.items()
diff --git a/recipes/borsen_dk.recipe b/recipes/borsen_dk.recipe
index 3a4e47f345..5414105b39 100644
--- a/recipes/borsen_dk.recipe
+++ b/recipes/borsen_dk.recipe
@@ -22,9 +22,9 @@ class Borsen_dk(BasicNewsRecipe):
language = 'da'
keep_only_tags = [
- dict(name="h1", attrs={'itemprop': 'headline'}),
- dict(name="div", attrs={'itemprob': 'datePublished'}),
- dict(name="div", attrs={'itemprop': 'articleBody'}),
+ dict(name='h1', attrs={'itemprop': 'headline'}),
+ dict(name='div', attrs={'itemprob': 'datePublished'}),
+ dict(name='div', attrs={'itemprop': 'articleBody'}),
]
# Feed are found here:
diff --git a/recipes/boston.com.recipe b/recipes/boston.com.recipe
index 82f75c53d4..e4cbd3cd2f 100644
--- a/recipes/boston.com.recipe
+++ b/recipes/boston.com.recipe
@@ -42,24 +42,24 @@ def class_startswith(*prefixes):
# From: https://www3.bostonglobe.com/lifestyle/comics?arc404=true
comics_to_fetch = {
- "ADAM@HOME": 'ad',
- "ARLO & JANIS": 'aj',
+ 'ADAM@HOME': 'ad',
+ 'ARLO & JANIS': 'aj',
# "CUL DE SAC": 'cds',
# "CURTIS": 'kfcrt',
- "DILBERT": 'dt',
- "DOONESBURY": 'db',
- "DUSTIN": 'kfdus',
- "F MINUS": 'fm',
- "FOR BETTER OR WORSE": 'fb',
+ 'DILBERT': 'dt',
+ 'DOONESBURY': 'db',
+ 'DUSTIN': 'kfdus',
+ 'F MINUS': 'fm',
+ 'FOR BETTER OR WORSE': 'fb',
# "GET FUZZY": 'gz',
# "MOTHER GOOSE & GRIMM": 'tmmgg',
# "JUMPSTART": 'jt',
- "MONTY": 'mt',
+ 'MONTY': 'mt',
# "POOCH CAFE",
- "RHYMES WITH ORANGE": 'kfrwo',
+ 'RHYMES WITH ORANGE': 'kfrwo',
# "ROSE IS ROSE": 'rr',
# "ZIPPY THE PINHEAD": 'kfzpy',
- "ZITS": 'kfzt'
+ 'ZITS': 'kfzt'
}
@@ -77,10 +77,10 @@ def extract_json(raw_html):
def absolutize_url(url):
- if url.startswith("//"):
- return "https:" + url
+ if url.startswith('//'):
+ return 'https:' + url
if url.startswith('/'):
- url = "https://www.bostonglobe.com" + url
+ url = 'https://www.bostonglobe.com' + url
return url
@@ -120,7 +120,7 @@ def main():
class BostonGlobeSubscription(BasicNewsRecipe):
- title = "Boston Globe"
+ title = 'Boston Globe'
__author__ = 'Kovid Goyal'
description = 'The Boston Globe'
language = 'en_US'
diff --git a/recipes/boston_globe_print_edition.recipe b/recipes/boston_globe_print_edition.recipe
index 8c5e81df7c..2a596c4126 100644
--- a/recipes/boston_globe_print_edition.recipe
+++ b/recipes/boston_globe_print_edition.recipe
@@ -25,17 +25,17 @@ def class_startswith(*prefixes):
return dict(attrs={'class': q})
def absolutize_url(url):
- if url.startswith("//"):
- return "https:" + url
+ if url.startswith('//'):
+ return 'https:' + url
if url.startswith('/'):
- url = "https://www.bostonglobe.com" + url
+ url = 'https://www.bostonglobe.com' + url
return url
class BostonGlobePrint(BasicNewsRecipe):
- title = "Boston Globe | Print Edition"
+ title = 'Boston Globe | Print Edition'
__author__ = 'Kovid Goyal, unkn0wn'
- description = 'The Boston Globe - Today\'s Paper'
+ description = "The Boston Globe - Today's Paper"
language = 'en_US'
keep_only_tags = [
@@ -70,7 +70,7 @@ class BostonGlobePrint(BasicNewsRecipe):
for image in soup.findAll('img', src=True):
if image['src'].endswith('750.jpg'):
return 'https:' + image['src']
- self.log("\nCover unavailable")
+ self.log('\nCover unavailable')
cover = None
return cover
@@ -94,7 +94,7 @@ class BostonGlobePrint(BasicNewsRecipe):
desc = self.tag_to_string(d)
self.log(section, '\n\t', title, '\n\t', desc, '\n\t\t', url)
- feeds_dict[section].append({"title": title, "url": url, "description": desc})
+ feeds_dict[section].append({'title': title, 'url': url, 'description': desc})
return [(section, articles) for section, articles in feeds_dict.items()]
def preprocess_raw_html(self, raw_html, url):
diff --git a/recipes/brewiarz.recipe b/recipes/brewiarz.recipe
index ada68ebd17..4e9552ba51 100644
--- a/recipes/brewiarz.recipe
+++ b/recipes/brewiarz.recipe
@@ -23,40 +23,40 @@ class brewiarz(BasicNewsRecipe):
next_days = 1
def parse_index(self):
- dec2rom_dict = {"01": "i", "02": "ii", "03": "iii", "04": "iv",
- "05": "v", "06": "vi", "07": "vii", "08": "viii",
- "09": "ix", "10": "x", "11": "xi", "12": "xii"}
+ dec2rom_dict = {'01': 'i', '02': 'ii', '03': 'iii', '04': 'iv',
+ '05': 'v', '06': 'vi', '07': 'vii', '08': 'viii',
+ '09': 'ix', '10': 'x', '11': 'xi', '12': 'xii'}
- weekday_dict = {"Sunday": "Niedziela", "Monday": "Poniedziałek", "Tuesday": "Wtorek",
- "Wednesday": "Środa", "Thursday": "Czwartek", "Friday": "Piątek", "Saturday": "Sobota"}
+ weekday_dict = {'Sunday': 'Niedziela', 'Monday': 'Poniedziałek', 'Tuesday': 'Wtorek',
+ 'Wednesday': 'Środa', 'Thursday': 'Czwartek', 'Friday': 'Piątek', 'Saturday': 'Sobota'}
now = datetime.datetime.now()
feeds = []
for i in range(0, self.next_days):
url_date = now + datetime.timedelta(days=i)
- url_date_month = url_date.strftime("%m")
+ url_date_month = url_date.strftime('%m')
url_date_month_roman = dec2rom_dict[url_date_month]
- url_date_day = url_date.strftime("%d")
- url_date_year = url_date.strftime("%Y")[2:]
- url_date_weekday = url_date.strftime("%A")
+ url_date_day = url_date.strftime('%d')
+ url_date_year = url_date.strftime('%Y')[2:]
+ url_date_weekday = url_date.strftime('%A')
url_date_weekday_pl = weekday_dict[url_date_weekday]
- url = "http://brewiarz.pl/" + url_date_month_roman + "_" + \
- url_date_year + "/" + url_date_day + url_date_month + "/index.php3"
+ url = 'http://brewiarz.pl/' + url_date_month_roman + '_' + \
+ url_date_year + '/' + url_date_day + url_date_month + '/index.php3'
articles = self.parse_pages(url)
if articles:
- title = url_date_weekday_pl + " " + url_date_day + \
- "." + url_date_month + "." + url_date_year
+ title = url_date_weekday_pl + ' ' + url_date_day + \
+ '.' + url_date_month + '.' + url_date_year
feeds.append((title, articles))
else:
sectors = self.get_sectors(url)
for subpage in sectors:
- title = url_date_weekday_pl + " " + url_date_day + "." + \
- url_date_month + "." + url_date_year + " - " + subpage.string
- url = "http://brewiarz.pl/" + url_date_month_roman + "_" + url_date_year + \
- "/" + url_date_day + url_date_month + \
- "/" + subpage['href']
+ title = url_date_weekday_pl + ' ' + url_date_day + '.' + \
+ url_date_month + '.' + url_date_year + ' - ' + subpage.string
+ url = 'http://brewiarz.pl/' + url_date_month_roman + '_' + url_date_year + \
+ '/' + url_date_day + url_date_month + \
+ '/' + subpage['href']
print(url)
articles = self.parse_pages(url)
if articles:
@@ -91,7 +91,7 @@ class brewiarz(BasicNewsRecipe):
sublinks = ol.findAll(name='a')
for sublink in sublinks:
link_title = self.tag_to_string(
- link) + " - " + self.tag_to_string(sublink)
+ link) + ' - ' + self.tag_to_string(sublink)
link_url_print = re.sub(
'php3', 'php3?kr=_druk&wr=lg&', sublink['href'])
link_url = url[:-10] + link_url_print
@@ -145,7 +145,7 @@ class brewiarz(BasicNewsRecipe):
if x == tag:
break
else:
- print("Can't find", tag, "in", tag.parent)
+ print("Can't find", tag, 'in', tag.parent)
continue
for r in reversed(tag.contents):
tag.parent.insert(i, r)
diff --git a/recipes/business_insider.recipe b/recipes/business_insider.recipe
index d04913ea17..e08b84b1e0 100644
--- a/recipes/business_insider.recipe
+++ b/recipes/business_insider.recipe
@@ -22,10 +22,10 @@ class Business_insider(BasicNewsRecipe):
remove_empty_feeds = True
publication_type = 'newsportal'
masthead_url = 'http://static.businessinsider.com/assets/images/logos/tbi_print.jpg'
- extra_css = """
+ extra_css = '''
body{font-family: Arial,Helvetica,sans-serif }
img{margin-bottom: 0.4em; display:block}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/business_standard_print.recipe b/recipes/business_standard_print.recipe
index f4dc6d6de1..eafe84282e 100644
--- a/recipes/business_standard_print.recipe
+++ b/recipes/business_standard_print.recipe
@@ -64,7 +64,7 @@ class BusinessStandardPrint(BasicNewsRecipe):
if dt.weekday() == 6:
self.log.warn(
'Business Standard Does Not Have A Print Publication On Sunday. The Reports'
- ' And Columns On This Page Today Appeared In The Newspaper\'s Saturday Edition.'
+ " And Columns On This Page Today Appeared In The Newspaper's Saturday Edition."
)
url = 'https://apibs.business-standard.com/category/today-paper?sortBy=' + today
raw = self.index_to_soup(url, raw=True)
diff --git a/recipes/business_today.recipe b/recipes/business_today.recipe
index b8188de06a..4db915ee96 100644
--- a/recipes/business_today.recipe
+++ b/recipes/business_today.recipe
@@ -90,7 +90,7 @@ class BT(BasicNewsRecipe):
# Insert feeds in specified order, if available
- feedSort = ['Editor\'s Note', 'Editors note']
+ feedSort = ["Editor's Note", 'Editors note']
for i in feedSort:
if i in sections:
feeds.append((i, sections[i]))
diff --git a/recipes/cacm.recipe b/recipes/cacm.recipe
index aee3c68eca..201312128a 100644
--- a/recipes/cacm.recipe
+++ b/recipes/cacm.recipe
@@ -5,8 +5,8 @@ from calibre.web.feeds.news import BasicNewsRecipe
class CACM(BasicNewsRecipe):
- title = "ACM CACM Magazine"
- description = "Published on day 1 of every month."
+ title = 'ACM CACM Magazine'
+ description = 'Published on day 1 of every month.'
language = 'en'
oldest_article = 30
max_articles_per_feed = 100
@@ -17,16 +17,16 @@ class CACM(BasicNewsRecipe):
]
def get_cover_url(self):
- """
+ '''
Parse out cover URL from cover page.
Example:
From: https://cacm.acm.org/system/assets/0004/2570/April2022.Cover.1000x1338.large.jpg?1647524668&1647524668
Get: https://cacm.acm.org/system/assets/0004/2570/April2022.Cover.1000x1338.jpg
- """
+ '''
- soup = self.index_to_soup("https://cacm.acm.org/")
- a_img = soup.find("a", class_="menuCover")
- img_url = a_img.img["src"]
- img_url = img_url.split("?")[0]
- img_url = img_url.replace(".large", "")
+ soup = self.index_to_soup('https://cacm.acm.org/')
+ a_img = soup.find('a', class_='menuCover')
+ img_url = a_img.img['src']
+ img_url = img_url.split('?')[0]
+ img_url = img_url.replace('.large', '')
return img_url
diff --git a/recipes/calcalist.recipe b/recipes/calcalist.recipe
index 9d954f494a..933eddaf78 100644
--- a/recipes/calcalist.recipe
+++ b/recipes/calcalist.recipe
@@ -29,28 +29,28 @@ class AdvancedUserRecipe1283848012(BasicNewsRecipe):
]
feeds = [
- (u" דף הבית", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-8,00.xml"),
- (u" 24/7", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-3674,00.xml"),
- (u" באזז", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-3673,00.xml"),
- (u" משפט", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-3772,00.xml"),
- (u" רכב", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-3783,00.xml"),
- (u" אחריות וסביבה", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-3781,00.xml"),
- (u" דעות", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-3791,00.xml"),
- (u" תיירות ותעופה", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-3784,00.xml"),
- (u" קריירה", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-3782,00.xml"),
- (u" אחד העם", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-3768,00.xml"),
- (u" המלצות ואזהרות", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-3771,00.xml"),
- (u" הייטק והון סיכון", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-3928,00.xml"),
- (u" חדשות טכנולוגיה", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-3778,00.xml"),
- (u" תקשורת", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-4471,00.xml"),
- (u" אינטרנט", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-3773,00.xml"),
- (u" מכשירים וגאדג'טים", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-3777,00.xml"),
- (u" המדריך", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-3880,00.xml"),
- (u" אפליקציות", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-3998,00.xml"),
- (u" Play", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-3792,00.xml"),
- (u" הכסף", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-9,00.xml"),
- (u" עולם", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-13,00.xml"),
- (u" פרסום ושיווק", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-5,00.xml"),
- (u" פנאי", u"http://www.calcalist.co.il/GeneralRSS/0,16335,L-3,00.xml"),
- (u" עסקי ספורט", u"http://WallaNewsw.calcalist.co.il/GeneralRSS/0,16335,L-18,00.xml")
+ (u' דף הבית', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-8,00.xml'),
+ (u' 24/7', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-3674,00.xml'),
+ (u' באזז', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-3673,00.xml'),
+ (u' משפט', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-3772,00.xml'),
+ (u' רכב', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-3783,00.xml'),
+ (u' אחריות וסביבה', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-3781,00.xml'),
+ (u' דעות', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-3791,00.xml'),
+ (u' תיירות ותעופה', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-3784,00.xml'),
+ (u' קריירה', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-3782,00.xml'),
+ (u' אחד העם', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-3768,00.xml'),
+ (u' המלצות ואזהרות', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-3771,00.xml'),
+ (u' הייטק והון סיכון', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-3928,00.xml'),
+ (u' חדשות טכנולוגיה', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-3778,00.xml'),
+ (u' תקשורת', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-4471,00.xml'),
+ (u' אינטרנט', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-3773,00.xml'),
+ (u" מכשירים וגאדג'טים", u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-3777,00.xml'),
+ (u' המדריך', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-3880,00.xml'),
+ (u' אפליקציות', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-3998,00.xml'),
+ (u' Play', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-3792,00.xml'),
+ (u' הכסף', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-9,00.xml'),
+ (u' עולם', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-13,00.xml'),
+ (u' פרסום ושיווק', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-5,00.xml'),
+ (u' פנאי', u'http://www.calcalist.co.il/GeneralRSS/0,16335,L-3,00.xml'),
+ (u' עסקי ספורט', u'http://WallaNewsw.calcalist.co.il/GeneralRSS/0,16335,L-18,00.xml')
]
diff --git a/recipes/calgary_herald.recipe b/recipes/calgary_herald.recipe
index 5ab1c722fa..e74af9365a 100644
--- a/recipes/calgary_herald.recipe
+++ b/recipes/calgary_herald.recipe
@@ -164,24 +164,24 @@ class CanWestPaper(BasicNewsRecipe):
continue
break
if daysback == 7:
- self.log("\nCover unavailable")
+ self.log('\nCover unavailable')
cover = None
return cover
def fixChars(self, string):
# Replace lsquo (\x91)
- fixed = re.sub("\x91", "‘", string)
+ fixed = re.sub('\x91', '‘', string)
# Replace rsquo (\x92)
- fixed = re.sub("\x92", "’", fixed)
+ fixed = re.sub('\x92', '’', fixed)
# Replace ldquo (\x93)
- fixed = re.sub("\x93", "“", fixed)
+ fixed = re.sub('\x93', '“', fixed)
# Replace rdquo (\x94)
- fixed = re.sub("\x94", "”", fixed)
+ fixed = re.sub('\x94', '”', fixed)
# Replace ndash (\x96)
- fixed = re.sub("\x96", "–", fixed)
+ fixed = re.sub('\x96', '–', fixed)
# Replace mdash (\x97)
- fixed = re.sub("\x97", "—", fixed)
- fixed = re.sub("’", "’", fixed)
+ fixed = re.sub('\x97', '—', fixed)
+ fixed = re.sub('’', '’', fixed)
return fixed
def massageNCXText(self, description):
@@ -262,10 +262,10 @@ class CanWestPaper(BasicNewsRecipe):
if url.startswith('/'):
url = self.url_prefix + url
if not url.startswith(self.url_prefix):
- print("Rejected " + url)
+ print('Rejected ' + url)
return
if url in self.url_list:
- print("Rejected dup " + url)
+ print('Rejected dup ' + url)
return
self.url_list.append(url)
title = self.tag_to_string(atag, False)
@@ -277,8 +277,8 @@ class CanWestPaper(BasicNewsRecipe):
return
dtag = adiv.find('div', 'content')
description = ''
- print("URL " + url)
- print("TITLE " + title)
+ print('URL ' + url)
+ print('TITLE ' + title)
if dtag is not None:
stag = dtag.span
if stag is not None:
@@ -286,18 +286,18 @@ class CanWestPaper(BasicNewsRecipe):
description = self.tag_to_string(stag, False)
else:
description = self.tag_to_string(dtag, False)
- print("DESCRIPTION: " + description)
+ print('DESCRIPTION: ' + description)
if key not in articles:
articles[key] = []
articles[key].append(dict(
title=title, url=url, date='', description=description, author='', content=''))
def parse_web_index(key, keyurl):
- print("Section: " + key + ': ' + self.url_prefix + keyurl)
+ print('Section: ' + key + ': ' + self.url_prefix + keyurl)
try:
soup = self.index_to_soup(self.url_prefix + keyurl)
except:
- print("Section: " + key + ' NOT FOUND')
+ print('Section: ' + key + ' NOT FOUND')
return
ans.append(key)
mainsoup = soup.find('div', 'bodywrapper')
diff --git a/recipes/capital_gr.recipe b/recipes/capital_gr.recipe
index 80bfafb953..848a24f41b 100644
--- a/recipes/capital_gr.recipe
+++ b/recipes/capital_gr.recipe
@@ -17,7 +17,7 @@ class Capital(BasicNewsRecipe):
keep_only_tags = [
dict(name='h1'),
dict(name='p'),
- dict(name='span', attrs={'id': ["textbody"]})
+ dict(name='span', attrs={'id': ['textbody']})
]
# 3 posts seemed to have utf8 encoding
diff --git a/recipes/caravan_magazine.recipe b/recipes/caravan_magazine.recipe
index 115424802d..7a3630e2fd 100644
--- a/recipes/caravan_magazine.recipe
+++ b/recipes/caravan_magazine.recipe
@@ -96,7 +96,7 @@ class CaravanMagazine(BasicNewsRecipe):
br = BasicNewsRecipe.get_browser(self, *args, **kw)
if not self.username or not self.password:
return br
- data = json.dumps({"0":{"json":{"email":self.username,"password":self.password}}})
+ data = json.dumps({'0':{'json':{'email':self.username,'password':self.password}}})
if not isinstance(data, bytes):
data = data.encode('utf-8')
rq = Request(
@@ -138,7 +138,7 @@ class CaravanMagazine(BasicNewsRecipe):
d = self.recipe_specific_options.get('date')
if d and isinstance(d, str):
x = d.split('-')
- inp = json.dumps({"0":{"json":{"month":int(x[0]),"year":int(x[1])}}})
+ inp = json.dumps({'0':{'json':{'month':int(x[0]),'year':int(x[1])}}})
api = 'https://api.caravanmagazine.in/api/trpc/magazines.getForMonthAndYear?batch=1&input=' + quote(inp, safe='')
raw = json.loads(self.index_to_soup(api, raw=True))
@@ -174,7 +174,7 @@ class CaravanMagazine(BasicNewsRecipe):
def print_version(self, url):
slug = urlparse(url).path
- inp = json.dumps({"0":{"json":{"slug":slug}}})
+ inp = json.dumps({'0':{'json':{'slug':slug}}})
return 'https://api.caravanmagazine.in/api/trpc/articles.getFromCache?batch=1&input=' + quote(inp, safe='')
def preprocess_raw_html(self, raw, url):
diff --git a/recipes/cato.recipe b/recipes/cato.recipe
index c2d7332f17..c1a22df772 100644
--- a/recipes/cato.recipe
+++ b/recipes/cato.recipe
@@ -5,9 +5,9 @@ from calibre.web.feeds.news import BasicNewsRecipe
class CATOInstitute(BasicNewsRecipe):
title = u'The CATO Institute'
- description = "The Cato Institute is a public policy research organization — a think tank — \
+ description = 'The Cato Institute is a public policy research organization — a think tank — \
dedicated to the principles of individual liberty, limited government, free markets and peace.\
- Its scholars and analysts conduct independent, nonpartisan research on a wide range of policy issues."
+ Its scholars and analysts conduct independent, nonpartisan research on a wide range of policy issues.'
__author__ = '_reader'
__date__ = '05 July 2012'
__version__ = '1.0'
diff --git a/recipes/chr_mon.recipe b/recipes/chr_mon.recipe
index 0bf344b2b3..02a91ddc1f 100644
--- a/recipes/chr_mon.recipe
+++ b/recipes/chr_mon.recipe
@@ -24,7 +24,7 @@ class CSMonitor(BasicNewsRecipe):
remove_empty_feeds = True
publication_type = 'newspaper'
masthead_url = 'http://www.csmonitor.com/extension/csm_base/design/csm_design/images/csmlogo_179x46.gif'
- extra_css = """
+ extra_css = '''
body{font-family: Arial,Tahoma,Verdana,Helvetica,sans-serif }
img{margin-bottom: 0.4em; display:block}
.head {font-family: Georgia,"Times New Roman",Times,serif}
@@ -32,7 +32,7 @@ class CSMonitor(BasicNewsRecipe):
.hide{display: none}
.sLoc{font-weight: bold}
ul{list-style-type: none}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/chronicle_higher_ed.recipe b/recipes/chronicle_higher_ed.recipe
index 619dbb287c..84087d3e44 100644
--- a/recipes/chronicle_higher_ed.recipe
+++ b/recipes/chronicle_higher_ed.recipe
@@ -39,7 +39,7 @@ class Chronicle(BasicNewsRecipe):
# Go to the issue
soup0 = self.index_to_soup('http://chronicle.com/section/Archives/39/')
issue = soup0.find('ul', attrs={'class': 'feature-promo-list'}).li
- issueurl = "http://chronicle.com" + issue.a['href']
+ issueurl = 'http://chronicle.com' + issue.a['href']
# Find date
dates = self.tag_to_string(issue.a).split(': ')[-1]
@@ -47,12 +47,12 @@ class Chronicle(BasicNewsRecipe):
# Find cover
cover = soup0.find('div', attrs={
- 'class': 'side-content'}).find(attrs={'src': re.compile("photos/biz/Current")})
+ 'class': 'side-content'}).find(attrs={'src': re.compile('photos/biz/Current')})
if cover is not None:
- if "chronicle.com" in cover['src']:
+ if 'chronicle.com' in cover['src']:
self.cover_url = cover['src']
else:
- self.cover_url = "http://chronicle.com" + cover['src']
+ self.cover_url = 'http://chronicle.com' + cover['src']
# Go to the main body
soup = self.index_to_soup(issueurl)
div = soup.find('div', attrs={'id': 'article-body'})
@@ -64,7 +64,7 @@ class Chronicle(BasicNewsRecipe):
a = post.find('a', href=True)
if a is not None:
title = self.tag_to_string(a)
- url = "http://chronicle.com" + a['href'].strip()
+ url = 'http://chronicle.com' + a['href'].strip()
sectiontitle = post.findPrevious('h3')
if sectiontitle is None:
sectiontitle = post.findPrevious('h4')
diff --git a/recipes/cicero.recipe b/recipes/cicero.recipe
index 4a4acd2507..94d713820b 100644
--- a/recipes/cicero.recipe
+++ b/recipes/cicero.recipe
@@ -18,24 +18,24 @@ class BasicUserRecipe1316245412(BasicNewsRecipe):
# remove_javascript = True
remove_tags = [
- dict(name='div', attrs={'id': ["header", "navigation", "skip-link",
- "header-print", "header-print-url", "meta-toolbar", "footer"]}),
- dict(name='div', attrs={'class': ["region region-sidebar-first column sidebar", "breadcrumb",
- "breadcrumb-title", "meta", "comment-wrapper",
- "field field-name-field-show-teaser-right field-type-list-boolean field-label-above",
- "page-header",
- "view view-alle-karikaturen view-id-alle_karikaturen view-display-id-default view-dom-id-1",
- "pagination",
- "view view-letzte-videos view-id-letzte_videos view-display-id-default view-dom-id-1",
- "view view-letzte-videos view-id-letzte_videos view-display-id-default view-dom-id-2", # 2011-09-23
- "view view-alle-karikaturen view-id-alle_karikaturen view-display-id-default view-dom-id-2", # 2011-09-23
+ dict(name='div', attrs={'id': ['header', 'navigation', 'skip-link',
+ 'header-print', 'header-print-url', 'meta-toolbar', 'footer']}),
+ dict(name='div', attrs={'class': ['region region-sidebar-first column sidebar', 'breadcrumb',
+ 'breadcrumb-title', 'meta', 'comment-wrapper',
+ 'field field-name-field-show-teaser-right field-type-list-boolean field-label-above',
+ 'page-header',
+ 'view view-alle-karikaturen view-id-alle_karikaturen view-display-id-default view-dom-id-1',
+ 'pagination',
+ 'view view-letzte-videos view-id-letzte_videos view-display-id-default view-dom-id-1',
+ 'view view-letzte-videos view-id-letzte_videos view-display-id-default view-dom-id-2', # 2011-09-23
+ 'view view-alle-karikaturen view-id-alle_karikaturen view-display-id-default view-dom-id-2', # 2011-09-23
]}),
- dict(name='div', attrs={'title': ["Dossier Auswahl"]}),
- dict(name='h2', attrs={'class': ["title comment-form"]}),
+ dict(name='div', attrs={'title': ['Dossier Auswahl']}),
+ dict(name='h2', attrs={'class': ['title comment-form']}),
dict(name='form', attrs={
- 'class': ["comment-form user-info-from-cookie"]}),
+ 'class': ['comment-form user-info-from-cookie']}),
dict(name='table', attrs={
- 'class': ["mcx-social-horizontal", "page-header"]}),
+ 'class': ['mcx-social-horizontal', 'page-header']}),
]
feeds = [
diff --git a/recipes/cincinnati_enquirer.recipe b/recipes/cincinnati_enquirer.recipe
index 4e2409024c..2fcad1c635 100644
--- a/recipes/cincinnati_enquirer.recipe
+++ b/recipes/cincinnati_enquirer.recipe
@@ -34,7 +34,7 @@ class AdvancedUserRecipe1234144423(BasicNewsRecipe):
dict(name='div', attrs={'class': ['padding', 'sidebar-photo', 'blog caitlin']})]
remove_tags = [
- dict(name=['object', 'link', 'table', 'embed']), dict(name='div', attrs={'id': ["pluckcomments", "StoryChat"]}), dict(
+ dict(name=['object', 'link', 'table', 'embed']), dict(name='div', attrs={'id': ['pluckcomments', 'StoryChat']}), dict(
name='div', attrs={'class': ['articleflex-container', ]}), dict(name='p', attrs={'class': ['posted', 'tags']})
]
diff --git a/recipes/ciperchile.recipe b/recipes/ciperchile.recipe
index 9fb90d6fbd..c503b218bc 100644
--- a/recipes/ciperchile.recipe
+++ b/recipes/ciperchile.recipe
@@ -23,14 +23,14 @@ class CiperChile(BasicNewsRecipe):
remove_empty_feeds = True
publication_type = 'blog'
masthead_url = 'http://ciperchile.cl/wp-content/themes/cipertheme/css/ui/ciper-logo.png'
- extra_css = """
+ extra_css = '''
body{font-family: Arial,sans-serif}
.excerpt{font-family: Georgia,"Times New Roman",Times,serif; font-style: italic; font-size: 1.25em}
.author{font-family: Georgia,"Times New Roman",Times,serif; font-style: italic; font-size: small}
.date{font-family: Georgia,"Times New Roman",Times,serif; font-size: small; color: grey}
.epigrafe{font-size: small; color: grey}
img{margin-bottom: 0.4em; display:block}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/clarin.recipe b/recipes/clarin.recipe
index da37fd8ad7..4138eca284 100644
--- a/recipes/clarin.recipe
+++ b/recipes/clarin.recipe
@@ -44,7 +44,7 @@ class Clarin(BasicNewsRecipe):
# To get all the data (images)
auto_cleanup = False
- extra_css = """
+ extra_css = '''
h1#title {
line-height: 1em;
margin: 0 0 .5em 0;
@@ -64,7 +64,7 @@ class Clarin(BasicNewsRecipe):
font-size: .9em;
margin-bottom: .5em;
}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/cnetjapan.recipe b/recipes/cnetjapan.recipe
index e1e4d79827..6dbcb927e5 100644
--- a/recipes/cnetjapan.recipe
+++ b/recipes/cnetjapan.recipe
@@ -25,16 +25,16 @@ class CNetJapan(BasicNewsRecipe):
lambda match: ''),
]
- remove_tags_before = dict(id="contents_l")
+ remove_tags_before = dict(id='contents_l')
remove_tags = [
- {'class': "social_bkm_share"},
- {'class': "social_bkm_print"},
- {'class': "block20 clearfix"},
- dict(name="div", attrs={'id': 'bookreview'}),
- {'class': "tag_left_ttl"},
- {'class': "tag_right"}
+ {'class': 'social_bkm_share'},
+ {'class': 'social_bkm_print'},
+ {'class': 'block20 clearfix'},
+ dict(name='div', attrs={'id': 'bookreview'}),
+ {'class': 'tag_left_ttl'},
+ {'class': 'tag_right'}
]
- remove_tags_after = {'class': "block20"}
+ remove_tags_after = {'class': 'block20'}
def parse_feeds(self):
diff --git a/recipes/cnetjapan_digital.recipe b/recipes/cnetjapan_digital.recipe
index 9cb2a148b4..db10032de9 100644
--- a/recipes/cnetjapan_digital.recipe
+++ b/recipes/cnetjapan_digital.recipe
@@ -25,16 +25,16 @@ class CNetJapanDigital(BasicNewsRecipe):
lambda match: ''),
]
- remove_tags_before = dict(id="contents_l")
+ remove_tags_before = dict(id='contents_l')
remove_tags = [
- {'class': "social_bkm_share"},
- {'class': "social_bkm_print"},
- {'class': "block20 clearfix"},
- dict(name="div", attrs={'id': 'bookreview'}),
- {'class': "tag_left_ttl"},
- {'class': "tag_right"}
+ {'class': 'social_bkm_share'},
+ {'class': 'social_bkm_print'},
+ {'class': 'block20 clearfix'},
+ dict(name='div', attrs={'id': 'bookreview'}),
+ {'class': 'tag_left_ttl'},
+ {'class': 'tag_right'}
]
- remove_tags_after = {'class': "block20"}
+ remove_tags_after = {'class': 'block20'}
def parse_feeds(self):
diff --git a/recipes/cnetjapan_release.recipe b/recipes/cnetjapan_release.recipe
index 4b85d24b9b..1cf29aef02 100644
--- a/recipes/cnetjapan_release.recipe
+++ b/recipes/cnetjapan_release.recipe
@@ -25,15 +25,15 @@ class CNetJapanRelease(BasicNewsRecipe):
lambda match: ''),
]
- remove_tags_before = dict(id="contents_l")
+ remove_tags_before = dict(id='contents_l')
remove_tags = [
- {'class': "social_bkm_share"},
- {'class': "social_bkm_print"},
- {'class': "block20 clearfix"},
- dict(name="div", attrs={'id': 'bookreview'}),
- {'class': "tag_left_ttl"}
+ {'class': 'social_bkm_share'},
+ {'class': 'social_bkm_print'},
+ {'class': 'block20 clearfix'},
+ dict(name='div', attrs={'id': 'bookreview'}),
+ {'class': 'tag_left_ttl'}
]
- remove_tags_after = {'class': "block20"}
+ remove_tags_after = {'class': 'block20'}
def parse_feeds(self):
diff --git a/recipes/cnetnews.recipe b/recipes/cnetnews.recipe
index aefb2fda96..a98034ca21 100644
--- a/recipes/cnetnews.recipe
+++ b/recipes/cnetnews.recipe
@@ -56,7 +56,7 @@ class CnetNews(BasicNewsRecipe):
keep_only_tags = [
dict(name='h1'),
dict(section='author'),
- dict(id=["article-body", 'cnetReview']),
+ dict(id=['article-body', 'cnetReview']),
dict(attrs={'class': 'deal-content'}),
]
diff --git a/recipes/cnn.recipe b/recipes/cnn.recipe
index 9089c9d2bb..b8a80131cc 100644
--- a/recipes/cnn.recipe
+++ b/recipes/cnn.recipe
@@ -72,7 +72,7 @@ class CNN(BasicNewsRecipe):
try:
br.open(masthead)
except:
- self.log("\nCover unavailable")
+ self.log('\nCover unavailable')
masthead = None
return masthead
diff --git a/recipes/contretemps.recipe b/recipes/contretemps.recipe
index fb72855ba0..8c96314279 100644
--- a/recipes/contretemps.recipe
+++ b/recipes/contretemps.recipe
@@ -36,9 +36,9 @@ class ContretempsRecipe(BasicNewsRecipe):
return None
def default_cover(self, cover_file):
- """
+ '''
Crée une couverture personnalisée pour Contretemps
- """
+ '''
from qt.core import QColor, QFont, QImage, QPainter, QPen, QRect, Qt
from calibre.gui2 import ensure_app, load_builtin_fonts, pixmap_to_data
@@ -56,7 +56,7 @@ class ContretempsRecipe(BasicNewsRecipe):
weekday = french_weekday[wkd]
month = french_month[today.month]
- date_str = f"{weekday} {today.day} {month} {today.year}"
+ date_str = f'{weekday} {today.day} {month} {today.year}'
edition = today.strftime('Édition de %Hh%M')
# Création de l'image de base (ratio ~1.6 pour format livre)
diff --git a/recipes/cosmos.recipe b/recipes/cosmos.recipe
index b973c027bd..0100149ed0 100644
--- a/recipes/cosmos.recipe
+++ b/recipes/cosmos.recipe
@@ -5,10 +5,10 @@ from calibre.web.feeds.news import BasicNewsRecipe
class CosmosMagazine(BasicNewsRecipe):
- title = "Cosmos Magazine"
+ title = 'Cosmos Magazine'
description = (
- "Cosmos is a quarterly science magazine with 4 editions a year (Mar, Jun, Sep, Dec)."
- "It is produced by The Royal Institution of Australia Inc (RiAus)."
+ 'Cosmos is a quarterly science magazine with 4 editions a year (Mar, Jun, Sep, Dec).'
+ 'It is produced by The Royal Institution of Australia Inc (RiAus).'
)
language = 'en_AU'
__author__ = 'yodha8'
diff --git a/recipes/courrierinternational.recipe b/recipes/courrierinternational.recipe
index 5aa05d55dc..193783171b 100644
--- a/recipes/courrierinternational.recipe
+++ b/recipes/courrierinternational.recipe
@@ -70,12 +70,12 @@ class CourrierInternational(BasicNewsRecipe):
}
'''
- needs_subscription = "optional"
+ needs_subscription = 'optional'
login_url = 'http://www.courrierinternational.com/login'
def get_browser(self):
def is_form_login(form):
- return "id" in form.attrs and form.attrs['id'] == "user-login-form"
+ return 'id' in form.attrs and form.attrs['id'] == 'user-login-form'
br = BasicNewsRecipe.get_browser(self)
if self.username:
br.open(self.login_url)
@@ -86,8 +86,8 @@ class CourrierInternational(BasicNewsRecipe):
return br
def preprocess_html(self, soup):
- for link in soup.findAll("a", href=re.compile('^/')):
- link["href"] = 'http://www.courrierinternational.com' + link["href"]
+ for link in soup.findAll('a', href=re.compile('^/')):
+ link['href'] = 'http://www.courrierinternational.com' + link['href']
return soup
feeds = [
diff --git a/recipes/cubadebate.recipe b/recipes/cubadebate.recipe
index ea87b6688c..335245ca4e 100644
--- a/recipes/cubadebate.recipe
+++ b/recipes/cubadebate.recipe
@@ -21,10 +21,10 @@ class CubaDebate(BasicNewsRecipe):
encoding = 'utf-8'
masthead_url = 'http://www.cubadebate.cu/wp-content/themes/cubadebate/images/logo.gif'
publication_type = 'newsportal'
- extra_css = """
+ extra_css = '''
#BlogTitle{font-size: xx-large; font-weight: bold}
body{font-family: Verdana, Arial, Tahoma, sans-serif}
- """
+ '''
conversion_options = {
'comments': description, 'tags': category, 'language': language, 'publisher': publisher
diff --git a/recipes/dainik_bhaskar.recipe b/recipes/dainik_bhaskar.recipe
index d9389c234b..129fd0ce2e 100644
--- a/recipes/dainik_bhaskar.recipe
+++ b/recipes/dainik_bhaskar.recipe
@@ -23,7 +23,7 @@ class DainikBhaskar(BasicNewsRecipe):
soup = self.index_to_soup('https://epaper.bhaskar.com/')
tag = soup.find(attrs={'class': 'scaleDiv'})
if tag:
- self.cover_url = tag.find('img')['src'].replace("_ss.jpg", "_l.jpg")
+ self.cover_url = tag.find('img')['src'].replace('_ss.jpg', '_l.jpg')
return super().get_cover_url()
keep_only_tags = [
diff --git a/recipes/danas.recipe b/recipes/danas.recipe
index cd6e8c8c9a..92ef429ba0 100644
--- a/recipes/danas.recipe
+++ b/recipes/danas.recipe
@@ -31,11 +31,11 @@ class Danas(BasicNewsRecipe):
auto_cleanup = True
auto_cleanup_keep = '//div[@class="post-intro-above"] //h1[@class="post-title"] | //div[@class="post-intro-title"] | //div[@class="post-meta-wrapper"]'
resolve_internal_links = True
- extra_css = """
+ extra_css = '''
.author{font-size: small}
.published {font-size: small}
img{margin-bottom: 0.8em}
- """
+ '''
conversion_options = {
'comment': description,
@@ -66,7 +66,7 @@ class Danas(BasicNewsRecipe):
'avgust', 'septembar', 'oktobar', 'novembar', 'decembar']
td = date.today()
monthname = months[td.month - 1]
- lurl = td.strftime("https://www.danas.rs/naslovna/naslovna-strana-za-%d-" + monthname + "-%Y/")
+ lurl = td.strftime('https://www.danas.rs/naslovna/naslovna-strana-za-%d-' + monthname + '-%Y/')
soup = self.index_to_soup(lurl)
al = soup.find('div', attrs={'class':'corax-image'})
if al and al.img:
diff --git a/recipes/degentenaar.recipe b/recipes/degentenaar.recipe
index 13f93f778a..fd42704dad 100644
--- a/recipes/degentenaar.recipe
+++ b/recipes/degentenaar.recipe
@@ -77,9 +77,9 @@ class DeGentenaarOnline(BasicNewsRecipe):
soup.html['lang'] = self.lang
soup.html['dir'] = self.direction
mlang = new_tag(soup, 'meta', [
- ("http-equiv", "Content-Language"), ("content", self.lang)])
+ ('http-equiv', 'Content-Language'), ('content', self.lang)])
mcharset = new_tag(soup, 'meta', [
- ("http-equiv", "Content-Type"), ("content", "text/html; charset=utf-8")])
+ ('http-equiv', 'Content-Type'), ('content', 'text/html; charset=utf-8')])
soup.head.insert(0, mlang)
soup.head.insert(1, mcharset)
return soup
diff --git a/recipes/democracy_journal.recipe b/recipes/democracy_journal.recipe
index 96c80c946c..965947d0df 100644
--- a/recipes/democracy_journal.recipe
+++ b/recipes/democracy_journal.recipe
@@ -16,8 +16,8 @@ class AdvancedUserRecipe1361743898(BasicNewsRecipe):
def parse_index(self):
articles = []
feeds = []
- soup = self.index_to_soup("http://www.democracyjournal.org")
- for x in soup.findAll(href=re.compile(r"http://www\.democracyjournal\.org/\d*/.*php$")):
+ soup = self.index_to_soup('http://www.democracyjournal.org')
+ for x in soup.findAll(href=re.compile(r'http://www\.democracyjournal\.org/\d*/.*php$')):
url = x.get('href')
title = self.tag_to_string(x)
articles.append({'title': title, 'url': url,
diff --git a/recipes/demorgen_be.recipe b/recipes/demorgen_be.recipe
index 98e77f97c3..8d7eab72a4 100644
--- a/recipes/demorgen_be.recipe
+++ b/recipes/demorgen_be.recipe
@@ -1,8 +1,8 @@
#!/usr/bin/env python
-"""
+'''
demorgen.be
-"""
+'''
from calibre.web.feeds.news import BasicNewsRecipe
@@ -13,7 +13,7 @@ class DeMorganBe(BasicNewsRecipe):
description = 'News from Belgium in Dutch'
oldest_article = 1
language = 'nl_BE'
- encoding = "utf-8"
+ encoding = 'utf-8'
max_articles_per_feed = 100
no_stylesheets = True
remove_attributes = ['style', 'height', 'width']
@@ -23,10 +23,10 @@ class DeMorganBe(BasicNewsRecipe):
masthead_url = 'https://www.demorgen.be/_next/static/media/demorgen_logo.dce579e2.svg'
cover_url = 'https://usercontent.one/wp/www.insidejazz.be/wp-content/uploads/2018/11/pic0143.png'
- extra_css = """
+ extra_css = '''
time, [data-test-id:"article-label"], [data-test-id:"article-sublabel"], [[data-test-id:"article-author"]] { font-size:small; }
[data-test-id:"header-intro"] { font-style: italic; }
- """
+ '''
keep_only_tags = [
dict(name='article', attrs={'id': 'article-content'}),
diff --git a/recipes/denik.cz.recipe b/recipes/denik.cz.recipe
index 2af252fc9a..b856914ab8 100644
--- a/recipes/denik.cz.recipe
+++ b/recipes/denik.cz.recipe
@@ -23,8 +23,8 @@ class ceskyDenikRecipe(BasicNewsRecipe):
cover_url = 'http://g.denik.cz/images/loga/denik.png'
remove_javascript = True
no_stylesheets = True
- extra_css = """
- """
+ extra_css = '''
+ '''
remove_tags = []
keep_only_tags = [dict(name='div', attrs={'class': 'content'})]
diff --git a/recipes/denikn.cz.recipe b/recipes/denikn.cz.recipe
index 50c7b9fd3e..a857cb98a2 100644
--- a/recipes/denikn.cz.recipe
+++ b/recipes/denikn.cz.recipe
@@ -11,11 +11,11 @@ CZ_MONTHS = ['led', 'úno', 'bře', 'dub', 'kvě', 'čen', 'čec', 'srp', 'zář
def cz_title_time():
- """
+ '''
Helper function to return date with czech locale.
Uses hardcoded lookup table of day and month names as strftime requires
locale change that is not thread safe.
- """
+ '''
today = datetime.today()
weekday = CZ_DAYS[today.weekday()]
month = CZ_MONTHS[today.month-1]
@@ -26,9 +26,9 @@ def cz_title_time():
class DenikNRecipe(BasicNewsRecipe):
- """
+ '''
Recipe for the RSS feed of https://denikn.cz/
- """
+ '''
title = u'Deník N'
__author__ = 'Robert Mihaly'
diff --git a/recipes/deredactie.recipe b/recipes/deredactie.recipe
index 1f6a1e5316..8fe8229a29 100644
--- a/recipes/deredactie.recipe
+++ b/recipes/deredactie.recipe
@@ -31,13 +31,13 @@ class deredactie(BasicNewsRecipe):
catnames = {}
soup = self.index_to_soup(
'http://www.deredactie.be/cm/vrtnieuws.deutsch')
- for elem in soup.findAll('li', attrs={'id': re.compile("^navItem[2-9]")}):
+ for elem in soup.findAll('li', attrs={'id': re.compile('^navItem[2-9]')}):
a = elem.find('a', href=True)
m = re.search('(?<=/)[^/]*$', a['href'])
cat = str(m.group(0))
categories.append(cat)
catnames[cat] = a['title']
- self.log("found cat %s\n" % catnames[cat])
+ self.log('found cat %s\n' % catnames[cat])
feeds = []
@@ -45,7 +45,7 @@ class deredactie(BasicNewsRecipe):
articles = []
soup = self.index_to_soup(
'http://www.deredactie.be/cm/vrtnieuws.deutsch/' + cat)
- for a in soup.findAll('a', attrs={'href': re.compile("deutsch.*/[0-9][0-9][0-9][0-9][0-9][0-9]_")}):
+ for a in soup.findAll('a', attrs={'href': re.compile('deutsch.*/[0-9][0-9][0-9][0-9][0-9][0-9]_')}):
skip_this_article = False
url = a['href'].strip()
if url.startswith('/'):
@@ -55,12 +55,12 @@ class deredactie(BasicNewsRecipe):
for article in articles:
if article['url'] == url:
skip_this_article = True
- self.log("SKIPPING DUP %s" % url)
+ self.log('SKIPPING DUP %s' % url)
break
if skip_this_article:
continue
articles.append(myarticle)
- self.log("Adding URL %s\n" % url)
+ self.log('Adding URL %s\n' % url)
if articles:
feeds.append((catnames[cat], articles))
return feeds
diff --git a/recipes/dilema.recipe b/recipes/dilema.recipe
index 1a64701880..b528696b09 100644
--- a/recipes/dilema.recipe
+++ b/recipes/dilema.recipe
@@ -34,7 +34,7 @@ class Volkskrant(BasicNewsRecipe):
dict(id=['like', 'dlik']),
dict(name=['script', 'noscript', 'style']),
]
- remove_attributes = ["class", "id", "name", "style"]
+ remove_attributes = ['class', 'id', 'name', 'style']
encoding = 'utf-8'
no_stylesheets = True
ignore_duplicate_articles = {'url'}
@@ -88,7 +88,7 @@ class Volkskrant(BasicNewsRecipe):
)
)
- sections = [("Numărul curent", articles)]
+ sections = [('Numărul curent', articles)]
return sections
def preprocess_html(self, soup):
diff --git a/recipes/distrowatch_weekly.recipe b/recipes/distrowatch_weekly.recipe
index 836b8a3d01..03aa38ed22 100644
--- a/recipes/distrowatch_weekly.recipe
+++ b/recipes/distrowatch_weekly.recipe
@@ -1,8 +1,8 @@
#!/usr/bin/env python
-__license__ = "GPL v3"
+__license__ = 'GPL v3'
-"""DistroWatch Weekly"""
+'''DistroWatch Weekly'''
import datetime
@@ -10,28 +10,28 @@ from calibre.web.feeds.news import BasicNewsRecipe
class DistroWatchWeekly(BasicNewsRecipe):
- title = "DistroWatch Weekly"
- description = "Weekly news about Linux distributions"
- category = "Linux, Technology, News"
+ title = 'DistroWatch Weekly'
+ description = 'Weekly news about Linux distributions'
+ category = 'Linux, Technology, News'
oldest_article = 14
- language = "en"
+ language = 'en'
max_articles_per_feed = 50
no_stylesheets = True
use_embedded_content = False
- timefmt = " [%A, %d %B, %Y]"
+ timefmt = ' [%A, %d %B, %Y]'
auto_cleanup = False
keep_only_tags = [
dict(
attrs={
- "class":
- lambda x: x and ("News1" in x)
+ 'class':
+ lambda x: x and ('News1' in x)
}
)
]
def _get_mag_date(self):
- """Return date of latest weekly issue."""
+ '''Return date of latest weekly issue.'''
d = datetime.date(2022, 6, 20)
t = datetime.date.today()
@@ -45,17 +45,17 @@ class DistroWatchWeekly(BasicNewsRecipe):
# Get URL of latest mag page
ld = self._get_mag_date()
- url = ld.strftime("https://distrowatch.com/weekly.php?issue=%Y%m%d")
+ url = ld.strftime('https://distrowatch.com/weekly.php?issue=%Y%m%d')
url = url.lower()
- title = ld.strftime("DistroWatch Weekly for %Y-%m-%d")
+ title = ld.strftime('DistroWatch Weekly for %Y-%m-%d')
# Get articles
stories = [{
- "url": url,
- "title": title,
+ 'url': url,
+ 'title': title,
},]
index = [
- ("Articles", stories),
+ ('Articles', stories),
]
return index
diff --git a/recipes/dnevnik_cro.recipe b/recipes/dnevnik_cro.recipe
index b19204264f..d2e3303763 100644
--- a/recipes/dnevnik_cro.recipe
+++ b/recipes/dnevnik_cro.recipe
@@ -23,7 +23,7 @@ def new_tag(soup, name, attrs=()):
class DnevnikCro(BasicNewsRecipe):
title = 'Dnevnik - Hr'
__author__ = 'Darko Miletic'
- description = "Vijesti iz Hrvatske"
+ description = 'Vijesti iz Hrvatske'
publisher = 'Dnevnik.hr'
category = 'news, politics, Croatia'
oldest_article = 2
@@ -67,9 +67,9 @@ class DnevnikCro(BasicNewsRecipe):
del item[attrib]
mlang = new_tag(soup, 'meta', [
- ("http-equiv", "Content-Language"), ("content", self.lang)])
+ ('http-equiv', 'Content-Language'), ('content', self.lang)])
mcharset = new_tag(soup, 'meta', [
- ("http-equiv", "Content-Type"), ("content", "text/html; charset=UTF-8")])
+ ('http-equiv', 'Content-Type'), ('content', 'text/html; charset=UTF-8')])
soup.head.insert(0, mlang)
soup.head.insert(1, mcharset)
return self.adeify_images(soup)
diff --git a/recipes/donga.recipe b/recipes/donga.recipe
index 0c55c88300..d6ac7e5537 100644
--- a/recipes/donga.recipe
+++ b/recipes/donga.recipe
@@ -4,15 +4,15 @@ from calibre.web.feeds.recipes import BasicNewsRecipe
# Comment out sections you are not interested in
sections = [
- ("정치", "politics"),
- ("사회", "national"),
- ("경제", "economy"),
- ("국제", "international"),
- ("사설칼럼", "editorials"),
- ("의학과학", "science"),
- ("문화연예", "culture"),
- ("스포츠", "sports"),
- ("사람속으로", "inmul")
+ ('정치', 'politics'),
+ ('사회', 'national'),
+ ('경제', 'economy'),
+ ('국제', 'international'),
+ ('사설칼럼', 'editorials'),
+ ('의학과학', 'science'),
+ ('문화연예', 'culture'),
+ ('스포츠', 'sports'),
+ ('사람속으로', 'inmul')
# Following sections are marked as marked optional
# as default. Uncomment to enable.
# , (u'건강', 'health')
@@ -26,24 +26,24 @@ sections = [
class Donga(BasicNewsRecipe):
- language = "ko"
- title = "동아일보"
- description = "동아일보 기사"
- __author__ = "Minsik Cho"
- ignore_duplicate_articles = {"title", "url"}
+ language = 'ko'
+ title = '동아일보'
+ description = '동아일보 기사'
+ __author__ = 'Minsik Cho'
+ ignore_duplicate_articles = {'title', 'url'}
compress_news_images = True
no_stylesheets = True
oldest_article = 2
- encoding = "utf-8"
+ encoding = 'utf-8'
# RSS Feed in syntax:
# https://rss.donga.com/[sections].xml
- feeds = [(title, "https://rss.donga.com/" + section + ".xml") for (title, section) in sections]
+ feeds = [(title, 'https://rss.donga.com/' + section + '.xml') for (title, section) in sections]
# Remove logo and print buttons
remove_tags = [
- dict(name="div", attrs={"class": "popHeaderWrap"}),
- dict(name="div", attrs={"class": "etc"}),
+ dict(name='div', attrs={'class': 'popHeaderWrap'}),
+ dict(name='div', attrs={'class': 'etc'}),
]
def print_version(self, url):
@@ -51,8 +51,8 @@ class Donga(BasicNewsRecipe):
# https://www.donga.com/news/[sections]/article/all/[date]/[gid]/1
# Return print version url with syntax:
# https://www.donga.com/news/View?gid=[gid]&date=[date]
- reobject = re.search("(?<=/all/)([0-9]*)/([0-9]*)", url)
+ reobject = re.search('(?<=/all/)([0-9]*)/([0-9]*)', url)
date = reobject.group(1)
gid = reobject.group(2)
- return "https://www.donga.com/news/View?gid=" + gid + "&date=" + date
+ return 'https://www.donga.com/news/View?gid=' + gid + '&date=' + date
diff --git a/recipes/dr_dk.recipe b/recipes/dr_dk.recipe
index 4ab0a9726e..85510a45cd 100644
--- a/recipes/dr_dk.recipe
+++ b/recipes/dr_dk.recipe
@@ -107,11 +107,11 @@ class DRNyheder(BasicNewsRecipe):
keep_only_tags = [
- dict(name="h1", attrs={'class': 'dre-article-title__heading'}), # Title
- dict(name="div", attrs={'class': 'dre-article-byline'}), # Author
- dict(name="figure", attrs={'class': 'dre-standard-article__figure'}), # Comment out to remove images
- dict(name="p", attrs={'class': 'dre-article-body-paragraph'}), # All body text of the article
- dict(name="article", attrs={'itemtype': 'http://schema.org/NewsArticle'}),
+ dict(name='h1', attrs={'class': 'dre-article-title__heading'}), # Title
+ dict(name='div', attrs={'class': 'dre-article-byline'}), # Author
+ dict(name='figure', attrs={'class': 'dre-standard-article__figure'}), # Comment out to remove images
+ dict(name='p', attrs={'class': 'dre-article-body-paragraph'}), # All body text of the article
+ dict(name='article', attrs={'itemtype': 'http://schema.org/NewsArticle'}),
#dict(name="h1", attrs={'class': 'hydra-latest-news-page-short-news__title'}),
#dict(name="p", attrs={'class': 'hydra-latest-news-page-short-news__paragraph'}),
#dict(name="div", attrs={'class': 'dre-speech'}),
@@ -123,7 +123,7 @@ class DRNyheder(BasicNewsRecipe):
dict(name='div', attrs={'class': [
'hydra-latest-news-page-short-news__share', 'hydra-latest-news-page-short-news__a11y-container',
'hydra-latest-news-page-short-news__meta', 'hydra-latest-news-page-short-news__image-slider', 'dre-byline__dates']}),
- dict(name="source"),
+ dict(name='source'),
#dict(name='menu', attrs={'class': 'share'}),
#dict(name='menu', attrs={'class': 'dr-site-share-horizontal'}),
]
diff --git a/recipes/dzieje_pl.recipe b/recipes/dzieje_pl.recipe
index 8496420553..82f8baff32 100644
--- a/recipes/dzieje_pl.recipe
+++ b/recipes/dzieje_pl.recipe
@@ -63,20 +63,20 @@ class Dzieje(BasicNewsRecipe):
def parse_index(self):
feeds = []
- feeds.append((u"Wiadomości", self.find_articles(
+ feeds.append((u'Wiadomości', self.find_articles(
'http://dzieje.pl/wiadomosci')))
- feeds.append((u"Kultura i sztuka", self.find_articles(
+ feeds.append((u'Kultura i sztuka', self.find_articles(
'http://dzieje.pl/kulturaisztuka')))
- feeds.append((u"Film", self.find_articles('http://dzieje.pl/kino')))
- feeds.append((u"Rozmaitości historyczne",
+ feeds.append((u'Film', self.find_articles('http://dzieje.pl/kino')))
+ feeds.append((u'Rozmaitości historyczne',
self.find_articles('http://dzieje.pl/rozmaitości')))
feeds.append(
- (u"Książka", self.find_articles('http://dzieje.pl/ksiazka')))
+ (u'Książka', self.find_articles('http://dzieje.pl/ksiazka')))
feeds.append(
- (u"Wystawa", self.find_articles('http://dzieje.pl/wystawa')))
- feeds.append((u"Edukacja", self.find_articles(
+ (u'Wystawa', self.find_articles('http://dzieje.pl/wystawa')))
+ feeds.append((u'Edukacja', self.find_articles(
'http://dzieje.pl/edukacja')))
- feeds.append((u"Dzieje się", self.find_articles(
+ feeds.append((u'Dzieje się', self.find_articles(
'http://dzieje.pl/wydarzenia')))
return feeds
diff --git a/recipes/dziennik_pl.recipe b/recipes/dziennik_pl.recipe
index d189ede4d3..e0d88ca28e 100644
--- a/recipes/dziennik_pl.recipe
+++ b/recipes/dziennik_pl.recipe
@@ -21,7 +21,7 @@ class Dziennik_pl(BasicNewsRecipe):
remove_empty_feeds = True
ignore_duplicate_articles = {'title', 'url'}
extra_css = 'ul {list-style: none; padding: 0; margin: 0;} .foto {float: left;} .clr {clear: both;}'
- preprocess_regexps = [(re.compile("Komentarze:"), lambda m: ''), (re.compile(
+ preprocess_regexps = [(re.compile('Komentarze:'), lambda m: ''), (re.compile(
'>>> CZYTAJ TAKŻE: ".*?"
'), lambda m: '')]
keep_only_tags = [dict(id='article')]
remove_tags = [dict(name='div', attrs={'class': ['art_box_dodatki', 'new_facebook_icons2', 'leftArt', 'article_print', 'quiz-widget', 'belka-spol', 'belka-spol belka-spol-bottom', 'art_data_tags', 'cl_right', 'boxRounded gal_inside']}), dict(name='a', attrs={'class': ['komentarz', 'article_icon_addcommnent']}), dict(name='ins'), dict(name='br')] # noqa: E501
diff --git a/recipes/dziennik_polski.recipe b/recipes/dziennik_polski.recipe
index 94b76229c5..08aa849e6d 100644
--- a/recipes/dziennik_polski.recipe
+++ b/recipes/dziennik_polski.recipe
@@ -120,7 +120,7 @@ class DziennikPolski24(BasicNewsRecipe):
if self.username is not None and self.password is not None:
br.open('http://www.dziennikpolski24.pl/pl/moje-konto/950606-loguj.html')
br.select_form(nr=1)
- br["user_login[login]"] = self.username
+ br['user_login[login]'] = self.username
br['user_login[pass]'] = self.password
br.submit()
return br
diff --git a/recipes/economist.recipe b/recipes/economist.recipe
index 0265dee51e..cb6aa782f4 100644
--- a/recipes/economist.recipe
+++ b/recipes/economist.recipe
@@ -63,7 +63,7 @@ def load_article_from_json(raw, root):
body = root.xpath('//body')[0]
article = E(body, 'article')
E(article, 'div', data['flyTitle'], style='color: red; font-size:small; font-weight:bold;')
- E(article, 'h1', data['title'], title=safe_dict(data, "url", "canonical") or '')
+ E(article, 'h1', data['title'], title=safe_dict(data, 'url', 'canonical') or '')
E(article, 'div', data['rubric'], style='font-style: italic; color:#202020;')
try:
date = data['dateModified']
@@ -97,8 +97,8 @@ def process_web_node(node):
return f'{node.get("textHtml")}
'
return f'{node.get("text", "")}
'
elif ntype == 'IMAGE':
- alt = "" if node.get("altText") is None else node.get("altText")
- cap = ""
+ alt = '' if node.get('altText') is None else node.get('altText')
+ cap = ''
if node.get('caption'):
if node['caption'].get('textHtml') is not None:
cap = node['caption']['textHtml']
@@ -123,7 +123,7 @@ def load_article_from_web_json(raw):
data = json.loads(raw)['props']['pageProps']['cp2Content']
body += f'{data.get("flyTitle", "")}
'
body += f'{data["headline"]}
'
- if data.get("rubric") and data.get("rubric") is not None:
+ if data.get('rubric') and data.get('rubric') is not None:
body += f'{data.get("rubric", "")}
'
try:
date = data['dateModified']
@@ -186,7 +186,7 @@ class Economist(BasicNewsRecipe):
encoding = 'utf-8'
masthead_url = 'https://www.livemint.com/lm-img/dev/economist-logo-oneline.png'
- __author__ = "Kovid Goyal"
+ __author__ = 'Kovid Goyal'
description = (
'Global news and current affairs from a European'
' perspective. Best downloaded on Friday mornings (GMT)'
@@ -199,7 +199,7 @@ class Economist(BasicNewsRecipe):
resolve_internal_links = True
remove_tags = [
dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent', 'aside', 'footer', 'svg']),
- dict(attrs={'aria-label': "Article Teaser"}),
+ dict(attrs={'aria-label': 'Article Teaser'}),
dict(attrs={'id': 'player'}),
dict(attrs={
'class': [
@@ -266,11 +266,11 @@ class Economist(BasicNewsRecipe):
if edition_date and isinstance(edition_date, str):
return parse_only_date(edition_date, as_utc=False)
try:
- url = self.browser.open("https://www.economist.com/printedition").geturl()
+ url = self.browser.open('https://www.economist.com/printedition').geturl()
except Exception as e:
self.log('Failed to fetch publication date with error: ' + str(e))
return super().publication_date()
- return parse_only_date(url.split("/")[-1], as_utc=False)
+ return parse_only_date(url.split('/')[-1], as_utc=False)
def economist_test_article(self):
return [('Articles', [{'title':'test',
@@ -364,23 +364,23 @@ class Economist(BasicNewsRecipe):
self.log('Got cover:', self.cover_url, '\n', self.description)
feeds_dict = defaultdict(list)
- for part in safe_dict(data, "hasPart", "parts"):
+ for part in safe_dict(data, 'hasPart', 'parts'):
try:
section = part['articleSection']['internal'][0]['title']
except Exception:
section = safe_dict(part, 'print', 'section', 'title') or 'section'
if section not in feeds_dict:
self.log(section)
- title = safe_dict(part, "title")
- desc = safe_dict(part, "rubric") or ''
- sub = safe_dict(part, "flyTitle") or ''
+ title = safe_dict(part, 'title')
+ desc = safe_dict(part, 'rubric') or ''
+ sub = safe_dict(part, 'flyTitle') or ''
if sub and section != sub:
desc = sub + ' :: ' + desc
pt = PersistentTemporaryFile('.html')
pt.write(json.dumps(part).encode('utf-8'))
pt.close()
url = 'file:///' + pt.name
- feeds_dict[section].append({"title": title, "url": url, "description": desc})
+ feeds_dict[section].append({'title': title, 'url': url, 'description': desc})
self.log('\t', title, '\n\t\t', desc)
return [(section, articles) for section, articles in feeds_dict.items()]
@@ -513,22 +513,22 @@ class Economist(BasicNewsRecipe):
return self.economist_return_index(ans)
def economist_parse_web_index(self, soup):
- script_tag = soup.find("script", id="__NEXT_DATA__")
+ script_tag = soup.find('script', id='__NEXT_DATA__')
if script_tag is not None:
data = json.loads(script_tag.string)
# open('/t/raw.json', 'w').write(json.dumps(data, indent=2, sort_keys=True))
- self.description = safe_dict(data, "props", "pageProps", "content", "headline")
- self.timefmt = ' [' + safe_dict(data, "props", "pageProps", "content", "formattedIssueDate") + ']'
- self.cover_url = safe_dict(data, "props", "pageProps", "content", "cover", "url").replace(
+ self.description = safe_dict(data, 'props', 'pageProps', 'content', 'headline')
+ self.timefmt = ' [' + safe_dict(data, 'props', 'pageProps', 'content', 'formattedIssueDate') + ']'
+ self.cover_url = safe_dict(data, 'props', 'pageProps', 'content', 'cover', 'url').replace(
'economist.com/', 'economist.com/cdn-cgi/image/width=960,quality=80,format=auto/').replace('SQ_', '')
self.log('Got cover:', self.cover_url)
feeds = []
for part in safe_dict(
- data, "props", "pageProps", "content", "headerSections"
- ) + safe_dict(data, "props", "pageProps", "content", "sections"):
- section = safe_dict(part, "name") or ''
+ data, 'props', 'pageProps', 'content', 'headerSections'
+ ) + safe_dict(data, 'props', 'pageProps', 'content', 'sections'):
+ section = safe_dict(part, 'name') or ''
if not section:
continue
self.log(section)
@@ -536,12 +536,12 @@ class Economist(BasicNewsRecipe):
articles = []
for ar in part['articles']:
- title = safe_dict(ar, "headline") or ''
- url = process_url(safe_dict(ar, "url") or '')
+ title = safe_dict(ar, 'headline') or ''
+ url = process_url(safe_dict(ar, 'url') or '')
if not title or not url:
continue
- desc = safe_dict(ar, "rubric") or ''
- sub = safe_dict(ar, "flyTitle") or ''
+ desc = safe_dict(ar, 'rubric') or ''
+ sub = safe_dict(ar, 'flyTitle') or ''
if sub and section != sub:
desc = sub + ' :: ' + desc
self.log('\t', title, '\n\t', desc, '\n\t\t', url)
diff --git a/recipes/economist_espresso.recipe b/recipes/economist_espresso.recipe
index 94e029f947..ab60ae5c2c 100644
--- a/recipes/economist_espresso.recipe
+++ b/recipes/economist_espresso.recipe
@@ -58,7 +58,7 @@ def load_article_from_json(raw, root):
body = root.xpath('//body')[0]
article = E(body, 'article')
E(article, 'div', data['flyTitle'] , style='color: red; font-size:small; font-weight:bold;')
- E(article, 'h1', data['title'], title=safe_dict(data, "url", "canonical") or '')
+ E(article, 'h1', data['title'], title=safe_dict(data, 'url', 'canonical') or '')
E(article, 'div', data['rubric'], style='font-style: italic; color:#202020;')
E(article, 'div', data['byline'], style='font-style: italic; color:#202020;')
main_image_url = safe_dict(data, 'image', 'main', 'url').get('canonical')
@@ -130,7 +130,7 @@ class Espresso(BasicNewsRecipe):
remove_tags = [
dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent', 'aside', 'footer']),
- dict(attrs={'aria-label': "Article Teaser"}),
+ dict(attrs={'aria-label': 'Article Teaser'}),
dict(attrs={
'class': [
'dblClkTrk', 'ec-article-info', 'share_inline_header',
@@ -189,13 +189,13 @@ class Espresso(BasicNewsRecipe):
self.description = data['rubric']
ans = []
- for part in safe_dict(data, "hasPart", "parts"):
- title = safe_dict(part, "title")
+ for part in safe_dict(data, 'hasPart', 'parts'):
+ title = safe_dict(part, 'title')
pt = PersistentTemporaryFile('.html')
pt.write(json.dumps(part).encode('utf-8'))
pt.close()
url = 'file:///' + pt.name
- ans.append({"title": title, "url": url})
+ ans.append({'title': title, 'url': url})
return [('Espresso', ans)]
def preprocess_html(self, soup):
diff --git a/recipes/economist_free.recipe b/recipes/economist_free.recipe
index 0265dee51e..cb6aa782f4 100644
--- a/recipes/economist_free.recipe
+++ b/recipes/economist_free.recipe
@@ -63,7 +63,7 @@ def load_article_from_json(raw, root):
body = root.xpath('//body')[0]
article = E(body, 'article')
E(article, 'div', data['flyTitle'], style='color: red; font-size:small; font-weight:bold;')
- E(article, 'h1', data['title'], title=safe_dict(data, "url", "canonical") or '')
+ E(article, 'h1', data['title'], title=safe_dict(data, 'url', 'canonical') or '')
E(article, 'div', data['rubric'], style='font-style: italic; color:#202020;')
try:
date = data['dateModified']
@@ -97,8 +97,8 @@ def process_web_node(node):
return f'{node.get("textHtml")}
'
return f'{node.get("text", "")}
'
elif ntype == 'IMAGE':
- alt = "" if node.get("altText") is None else node.get("altText")
- cap = ""
+ alt = '' if node.get('altText') is None else node.get('altText')
+ cap = ''
if node.get('caption'):
if node['caption'].get('textHtml') is not None:
cap = node['caption']['textHtml']
@@ -123,7 +123,7 @@ def load_article_from_web_json(raw):
data = json.loads(raw)['props']['pageProps']['cp2Content']
body += f'{data.get("flyTitle", "")}
'
body += f'{data["headline"]}
'
- if data.get("rubric") and data.get("rubric") is not None:
+ if data.get('rubric') and data.get('rubric') is not None:
body += f'{data.get("rubric", "")}
'
try:
date = data['dateModified']
@@ -186,7 +186,7 @@ class Economist(BasicNewsRecipe):
encoding = 'utf-8'
masthead_url = 'https://www.livemint.com/lm-img/dev/economist-logo-oneline.png'
- __author__ = "Kovid Goyal"
+ __author__ = 'Kovid Goyal'
description = (
'Global news and current affairs from a European'
' perspective. Best downloaded on Friday mornings (GMT)'
@@ -199,7 +199,7 @@ class Economist(BasicNewsRecipe):
resolve_internal_links = True
remove_tags = [
dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent', 'aside', 'footer', 'svg']),
- dict(attrs={'aria-label': "Article Teaser"}),
+ dict(attrs={'aria-label': 'Article Teaser'}),
dict(attrs={'id': 'player'}),
dict(attrs={
'class': [
@@ -266,11 +266,11 @@ class Economist(BasicNewsRecipe):
if edition_date and isinstance(edition_date, str):
return parse_only_date(edition_date, as_utc=False)
try:
- url = self.browser.open("https://www.economist.com/printedition").geturl()
+ url = self.browser.open('https://www.economist.com/printedition').geturl()
except Exception as e:
self.log('Failed to fetch publication date with error: ' + str(e))
return super().publication_date()
- return parse_only_date(url.split("/")[-1], as_utc=False)
+ return parse_only_date(url.split('/')[-1], as_utc=False)
def economist_test_article(self):
return [('Articles', [{'title':'test',
@@ -364,23 +364,23 @@ class Economist(BasicNewsRecipe):
self.log('Got cover:', self.cover_url, '\n', self.description)
feeds_dict = defaultdict(list)
- for part in safe_dict(data, "hasPart", "parts"):
+ for part in safe_dict(data, 'hasPart', 'parts'):
try:
section = part['articleSection']['internal'][0]['title']
except Exception:
section = safe_dict(part, 'print', 'section', 'title') or 'section'
if section not in feeds_dict:
self.log(section)
- title = safe_dict(part, "title")
- desc = safe_dict(part, "rubric") or ''
- sub = safe_dict(part, "flyTitle") or ''
+ title = safe_dict(part, 'title')
+ desc = safe_dict(part, 'rubric') or ''
+ sub = safe_dict(part, 'flyTitle') or ''
if sub and section != sub:
desc = sub + ' :: ' + desc
pt = PersistentTemporaryFile('.html')
pt.write(json.dumps(part).encode('utf-8'))
pt.close()
url = 'file:///' + pt.name
- feeds_dict[section].append({"title": title, "url": url, "description": desc})
+ feeds_dict[section].append({'title': title, 'url': url, 'description': desc})
self.log('\t', title, '\n\t\t', desc)
return [(section, articles) for section, articles in feeds_dict.items()]
@@ -513,22 +513,22 @@ class Economist(BasicNewsRecipe):
return self.economist_return_index(ans)
def economist_parse_web_index(self, soup):
- script_tag = soup.find("script", id="__NEXT_DATA__")
+ script_tag = soup.find('script', id='__NEXT_DATA__')
if script_tag is not None:
data = json.loads(script_tag.string)
# open('/t/raw.json', 'w').write(json.dumps(data, indent=2, sort_keys=True))
- self.description = safe_dict(data, "props", "pageProps", "content", "headline")
- self.timefmt = ' [' + safe_dict(data, "props", "pageProps", "content", "formattedIssueDate") + ']'
- self.cover_url = safe_dict(data, "props", "pageProps", "content", "cover", "url").replace(
+ self.description = safe_dict(data, 'props', 'pageProps', 'content', 'headline')
+ self.timefmt = ' [' + safe_dict(data, 'props', 'pageProps', 'content', 'formattedIssueDate') + ']'
+ self.cover_url = safe_dict(data, 'props', 'pageProps', 'content', 'cover', 'url').replace(
'economist.com/', 'economist.com/cdn-cgi/image/width=960,quality=80,format=auto/').replace('SQ_', '')
self.log('Got cover:', self.cover_url)
feeds = []
for part in safe_dict(
- data, "props", "pageProps", "content", "headerSections"
- ) + safe_dict(data, "props", "pageProps", "content", "sections"):
- section = safe_dict(part, "name") or ''
+ data, 'props', 'pageProps', 'content', 'headerSections'
+ ) + safe_dict(data, 'props', 'pageProps', 'content', 'sections'):
+ section = safe_dict(part, 'name') or ''
if not section:
continue
self.log(section)
@@ -536,12 +536,12 @@ class Economist(BasicNewsRecipe):
articles = []
for ar in part['articles']:
- title = safe_dict(ar, "headline") or ''
- url = process_url(safe_dict(ar, "url") or '')
+ title = safe_dict(ar, 'headline') or ''
+ url = process_url(safe_dict(ar, 'url') or '')
if not title or not url:
continue
- desc = safe_dict(ar, "rubric") or ''
- sub = safe_dict(ar, "flyTitle") or ''
+ desc = safe_dict(ar, 'rubric') or ''
+ sub = safe_dict(ar, 'flyTitle') or ''
if sub and section != sub:
desc = sub + ' :: ' + desc
self.log('\t', title, '\n\t', desc, '\n\t\t', url)
diff --git a/recipes/economist_news.recipe b/recipes/economist_news.recipe
index e35f06dc31..a95445403f 100644
--- a/recipes/economist_news.recipe
+++ b/recipes/economist_news.recipe
@@ -59,7 +59,7 @@ def load_article_from_json(raw, root):
body = root.xpath('//body')[0]
article = E(body, 'article')
E(article, 'div', data['flyTitle'], style='color: red; font-size:small; font-weight:bold;')
- E(article, 'h1', data['title'], title=safe_dict(data, "url", "canonical") or '')
+ E(article, 'h1', data['title'], title=safe_dict(data, 'url', 'canonical') or '')
E(article, 'div', data['rubric'], style='font-style: italic; color:#202020;')
try:
date = data['dateModified']
@@ -125,7 +125,7 @@ class EconomistNews(BasicNewsRecipe):
encoding = 'utf-8'
masthead_url = 'https://www.livemint.com/lm-img/dev/economist-logo-oneline.png'
- __author__ = "Kovid Goyal"
+ __author__ = 'Kovid Goyal'
description = (
'Global news and current affairs from a European'
' perspective. Get the latest articles here.'
@@ -140,7 +140,7 @@ class EconomistNews(BasicNewsRecipe):
resolve_internal_links = True
remove_tags = [
dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent', 'aside', 'footer', 'svg']),
- dict(attrs={'aria-label': "Article Teaser"}),
+ dict(attrs={'aria-label': 'Article Teaser'}),
dict(attrs={'id': 'player'}),
dict(attrs={
'class': [
@@ -234,9 +234,9 @@ class EconomistNews(BasicNewsRecipe):
articles = []
for art in part['hasPart']['parts']:
- title = safe_dict(art, "title")
- desc = safe_dict(art, "rubric") or ''
- sub = safe_dict(art, "flyTitle") or ''
+ title = safe_dict(art, 'title')
+ desc = safe_dict(art, 'rubric') or ''
+ sub = safe_dict(art, 'flyTitle') or ''
if sub and section != sub:
desc = sub + ' :: ' + desc
if not art.get('text'):
@@ -249,7 +249,7 @@ class EconomistNews(BasicNewsRecipe):
pt.write(json.dumps(art).encode('utf-8'))
pt.close()
url = 'file:///' + pt.name
- articles.append({"title": title, "url": url, "description": desc})
+ articles.append({'title': title, 'url': url, 'description': desc})
self.log('\t', title, '\n\t\t', desc)
if articles:
feeds.append((section, articles))
diff --git a/recipes/economist_search.recipe b/recipes/economist_search.recipe
index 632c8299d8..ae29fc5eab 100644
--- a/recipes/economist_search.recipe
+++ b/recipes/economist_search.recipe
@@ -23,8 +23,8 @@ def process_node(node):
return f'{node.get("textHtml")}
'
return f'{node.get("text", "")}
'
elif ntype == 'IMAGE':
- alt = "" if node.get("altText") is None else node.get("altText")
- cap = ""
+ alt = '' if node.get('altText') is None else node.get('altText')
+ cap = ''
if node.get('caption'):
if node['caption'].get('textHtml') is not None:
cap = node['caption']['textHtml']
@@ -112,7 +112,7 @@ class econ_search(BasicNewsRecipe):
title = 'The Economist - Search'
language = 'en'
encoding = 'utf-8'
- __author__ = "unkn0wn"
+ __author__ = 'unkn0wn'
description = (
'Use the Advanced section of the recipe to search.'
)
@@ -128,7 +128,7 @@ class econ_search(BasicNewsRecipe):
resolve_internal_links = True
remove_tags = [
dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent', 'aside', 'footer', 'svg']),
- dict(attrs={'aria-label': "Article Teaser"}),
+ dict(attrs={'aria-label': 'Article Teaser'}),
dict(attrs={'id':'player'}),
dict(attrs={
'class': [
diff --git a/recipes/economist_world_ahead.recipe b/recipes/economist_world_ahead.recipe
index ed5a798d80..8b2bd6d3fe 100644
--- a/recipes/economist_world_ahead.recipe
+++ b/recipes/economist_world_ahead.recipe
@@ -23,8 +23,8 @@ def process_node(node):
return f'{node.get("textHtml")}
'
return f'{node.get("text", "")}
'
elif ntype == 'IMAGE':
- alt = "" if node.get("altText") is None else node.get("altText")
- cap = ""
+ alt = '' if node.get('altText') is None else node.get('altText')
+ cap = ''
if node.get('caption'):
if node['caption'].get('textHtml') is not None:
cap = node['caption']['textHtml']
@@ -122,7 +122,7 @@ class EconomistWorld(BasicNewsRecipe):
encoding = 'utf-8'
masthead_url = 'https://www.livemint.com/lm-img/dev/economist-logo-oneline.png'
- __author__ = "unkn0wn"
+ __author__ = 'unkn0wn'
description = (
'The World Ahead is The Economist’s future-gazing publication. It prepares audiences for what is to '
'come with mind-stretching insights and expert analysis—all in The Economist’s clear, elegant style.'
@@ -136,7 +136,7 @@ class EconomistWorld(BasicNewsRecipe):
resolve_internal_links = True
remove_tags = [
dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent', 'aside', 'footer', 'svg']),
- dict(attrs={'aria-label': "Article Teaser"}),
+ dict(attrs={'aria-label': 'Article Teaser'}),
dict(attrs={'id': 'player'}),
dict(attrs={
'class': [
@@ -205,24 +205,24 @@ class EconomistWorld(BasicNewsRecipe):
return self.economist_return_index(ans)
def economist_parse_index(self, soup):
- script_tag = soup.find("script", id="__NEXT_DATA__")
+ script_tag = soup.find('script', id='__NEXT_DATA__')
if script_tag is not None:
data = json.loads(script_tag.string)
# open('/t/raw.json', 'w').write(json.dumps(data, indent=2, sort_keys=True))
- self.title = safe_dict(data, "props", "pageProps", "content", "headline")
+ self.title = safe_dict(data, 'props', 'pageProps', 'content', 'headline')
self.cover_url = 'https://mma.prnewswire.com/media/2561745/The_Economist_World_Ahead_2025_cover.jpg?w=600'
feeds = []
- for coll in safe_dict(data, "props", "pageProps", "content", "components"):
- section = safe_dict(coll, "headline") or ''
+ for coll in safe_dict(data, 'props', 'pageProps', 'content', 'components'):
+ section = safe_dict(coll, 'headline') or ''
self.log(section)
articles = []
- for part in safe_dict(coll, "items"):
- title = safe_dict(part, "headline") or ''
- url = process_url(safe_dict(part, "url") or '')
- desc = safe_dict(part, "rubric") or ''
- sub = safe_dict(part, "flyTitle") or ''
+ for part in safe_dict(coll, 'items'):
+ title = safe_dict(part, 'headline') or ''
+ url = process_url(safe_dict(part, 'url') or '')
+ desc = safe_dict(part, 'rubric') or ''
+ sub = safe_dict(part, 'flyTitle') or ''
if sub and section != sub:
desc = sub + ' :: ' + desc
self.log('\t', title, '\n\t', desc, '\n\t\t', url)
diff --git a/recipes/edmonton_journal.recipe b/recipes/edmonton_journal.recipe
index 5395ba7bda..2c43582ad0 100644
--- a/recipes/edmonton_journal.recipe
+++ b/recipes/edmonton_journal.recipe
@@ -164,24 +164,24 @@ class CanWestPaper(BasicNewsRecipe):
continue
break
if daysback == 7:
- self.log("\nCover unavailable")
+ self.log('\nCover unavailable')
cover = None
return cover
def fixChars(self, string):
# Replace lsquo (\x91)
- fixed = re.sub("\x91", "‘", string)
+ fixed = re.sub('\x91', '‘', string)
# Replace rsquo (\x92)
- fixed = re.sub("\x92", "’", fixed)
+ fixed = re.sub('\x92', '’', fixed)
# Replace ldquo (\x93)
- fixed = re.sub("\x93", "“", fixed)
+ fixed = re.sub('\x93', '“', fixed)
# Replace rdquo (\x94)
- fixed = re.sub("\x94", "”", fixed)
+ fixed = re.sub('\x94', '”', fixed)
# Replace ndash (\x96)
- fixed = re.sub("\x96", "–", fixed)
+ fixed = re.sub('\x96', '–', fixed)
# Replace mdash (\x97)
- fixed = re.sub("\x97", "—", fixed)
- fixed = re.sub("’", "’", fixed)
+ fixed = re.sub('\x97', '—', fixed)
+ fixed = re.sub('’', '’', fixed)
return fixed
def massageNCXText(self, description):
@@ -262,10 +262,10 @@ class CanWestPaper(BasicNewsRecipe):
if url.startswith('/'):
url = self.url_prefix + url
if not url.startswith(self.url_prefix):
- print("Rejected " + url)
+ print('Rejected ' + url)
return
if url in self.url_list:
- print("Rejected dup " + url)
+ print('Rejected dup ' + url)
return
self.url_list.append(url)
title = self.tag_to_string(atag, False)
@@ -277,8 +277,8 @@ class CanWestPaper(BasicNewsRecipe):
return
dtag = adiv.find('div', 'content')
description = ''
- print("URL " + url)
- print("TITLE " + title)
+ print('URL ' + url)
+ print('TITLE ' + title)
if dtag is not None:
stag = dtag.span
if stag is not None:
@@ -286,18 +286,18 @@ class CanWestPaper(BasicNewsRecipe):
description = self.tag_to_string(stag, False)
else:
description = self.tag_to_string(dtag, False)
- print("DESCRIPTION: " + description)
+ print('DESCRIPTION: ' + description)
if key not in articles:
articles[key] = []
articles[key].append(dict(
title=title, url=url, date='', description=description, author='', content=''))
def parse_web_index(key, keyurl):
- print("Section: " + key + ': ' + self.url_prefix + keyurl)
+ print('Section: ' + key + ': ' + self.url_prefix + keyurl)
try:
soup = self.index_to_soup(self.url_prefix + keyurl)
except:
- print("Section: " + key + ' NOT FOUND')
+ print('Section: ' + key + ' NOT FOUND')
return
ans.append(key)
mainsoup = soup.find('div', 'bodywrapper')
diff --git a/recipes/el_colombiano.recipe b/recipes/el_colombiano.recipe
index f557d85bbd..81c888bff9 100644
--- a/recipes/el_colombiano.recipe
+++ b/recipes/el_colombiano.recipe
@@ -20,12 +20,12 @@ class AdvancedUserRecipe1311790237(BasicNewsRecipe):
masthead_url = 'http://www.elcolombiano.com/images/logoElColombiano348x46.gif'
publication_type = 'newspaper'
- extra_css = """
+ extra_css = '''
p{text-align: justify; font-size: 100%}
body{ text-align: left; font-size:100% }
h1{font-family: sans-serif; font-size:150%; font-weight:bold; text-align: justify; }
h3{font-family: sans-serif; font-size:100%; font-style: italic; text-align: justify; }
- """
+ '''
feeds = [(u'Portada', u'http://www.elcolombiano.com/rss/portada.xml'),
(u'Antioquia', u'http://www.elcolombiano.com/rss/Antioquia.xml'),
diff --git a/recipes/el_cultural.recipe b/recipes/el_cultural.recipe
index fe767c507d..737aace9fc 100644
--- a/recipes/el_cultural.recipe
+++ b/recipes/el_cultural.recipe
@@ -55,9 +55,9 @@ class RevistaElCultural(BasicNewsRecipe):
if url.startswith('/version_papel/' + titleSection + '/'):
url = 'http://www.elcultural.es' + url
- self.log('\t\tFound article:', title[0:title.find("|") - 1])
+ self.log('\t\tFound article:', title[0:title.find('|') - 1])
self.log('\t\t\t', url)
- current_articles.append({'title': title[0:title.find("|") - 1], 'url': url,
+ current_articles.append({'title': title[0:title.find('|') - 1], 'url': url,
'description': '', 'date': ''})
return current_articles
diff --git a/recipes/el_diplo.recipe b/recipes/el_diplo.recipe
index c9c44e26f0..2374a639cb 100644
--- a/recipes/el_diplo.recipe
+++ b/recipes/el_diplo.recipe
@@ -1,51 +1,51 @@
# -*- mode: python; coding: utf-8; -*-
# vim: set syntax=python fileencoding=utf-8
-__license__ = "GPL v3"
-__copyright__ = "2023, Tomás Di Domenico "
+__license__ = 'GPL v3'
+__copyright__ = '2023, Tomás Di Domenico '
-"""
+'''
www.eldiplo.org
-"""
+'''
from calibre.web.feeds.news import BasicNewsRecipe
class ElDiplo2023(BasicNewsRecipe):
- title = "Le Monde Diplomatique - cono sur"
- __author__ = "Tomás Di Domenico"
- description = "Publicación de Le Monde Diplomatique para el cono sur."
- publisher = "Capital Intelectual"
- category = "News, Politics, Argentina, Uruguay, Paraguay, South America, World"
+ title = 'Le Monde Diplomatique - cono sur'
+ __author__ = 'Tomás Di Domenico'
+ description = 'Publicación de Le Monde Diplomatique para el cono sur.'
+ publisher = 'Capital Intelectual'
+ category = 'News, Politics, Argentina, Uruguay, Paraguay, South America, World'
oldest_article = 31
no_stylesheets = True
- encoding = "utf8"
+ encoding = 'utf8'
use_embedded_content = False
- language = "es_AR"
+ language = 'es_AR'
remove_empty_feeds = True
- publication_type = "magazine"
+ publication_type = 'magazine'
delay = 1
simultaneous_downloads = 1
timeout = 8
needs_subscription = True
- ignore_duplicate_articles = {"url"}
+ ignore_duplicate_articles = {'url'}
temp_files = []
fetch_retries = 10
handle_gzip = True
compress_news_images = True
scale_news_images_to_device = True
masthead_url = (
- "https://www.eldiplo.org/wp-content/themes/_polenta_/assets/diplo.png"
+ 'https://www.eldiplo.org/wp-content/themes/_polenta_/assets/diplo.png'
)
- INDEX = "https://www.eldiplo.org/"
+ INDEX = 'https://www.eldiplo.org/'
- conversion_options = {"series": "El Dipló", "publisher": publisher, "base_font_size": 8, "tags": category}
+ conversion_options = {'series': 'El Dipló', 'publisher': publisher, 'base_font_size': 8, 'tags': category}
- keep_only_tags = [dict(name=["article"])]
+ keep_only_tags = [dict(name=['article'])]
- remove_tags = [dict(name=["button"])]
+ remove_tags = [dict(name=['button'])]
- extra_css = """
+ extra_css = '''
.entry-title {
text-align: center;
}
@@ -67,59 +67,59 @@ class ElDiplo2023(BasicNewsRecipe):
padding-left: 10%;
padding-right: 10%;
}
- """
+ '''
def get_browser(self):
br = BasicNewsRecipe.get_browser(self)
br.open(self.INDEX)
if self.username is not None and self.password is not None:
- br.select_form(id="loginform")
- br["log"] = self.username
- br["pwd"] = self.password
+ br.select_form(id='loginform')
+ br['log'] = self.username
+ br['pwd'] = self.password
br.submit()
return br
def get_cover_url(self):
soup_index = self.index_to_soup(self.INDEX)
- tag_sumario = soup_index.find("span", text="Sumario")
- url_sumario = "https://www.eldiplo.org" + tag_sumario.parent["href"]
+ tag_sumario = soup_index.find('span', text='Sumario')
+ url_sumario = 'https://www.eldiplo.org' + tag_sumario.parent['href']
soup = self.index_to_soup(url_sumario)
- container = soup.find("div", class_="px-16")
- url = container.find("img")["src"]
+ container = soup.find('div', class_='px-16')
+ url = container.find('img')['src']
- return getattr(self, "cover_url", url)
+ return getattr(self, 'cover_url', url)
def _process_article(self, article):
- url = article.find("a", href=True, attrs={"class": "title"})["href"]
- title = self.tag_to_string(article).replace("Editorial", "Editorial: ")
+ url = article.find('a', href=True, attrs={'class': 'title'})['href']
+ title = self.tag_to_string(article).replace('Editorial', 'Editorial: ')
try:
- title, authors = title.split(", por")
- authors = f"por {authors}"
+ title, authors = title.split(', por')
+ authors = f'por {authors}'
except ValueError:
- authors = ""
- self.log("title: ", title, " url: ", url)
- return {"title": title, "url": url, "description": authors, "date": ""}
+ authors = ''
+ self.log('title: ', title, ' url: ', url)
+ return {'title': title, 'url': url, 'description': authors, 'date': ''}
def preprocess_html(self, soup):
- font_size = "90%"
+ font_size = '90%'
# make the footnotes smaller
- for p in soup.find("div", id="nota_pie").findChildren("p", recursive=False):
- p["style"] = f"font-size: {font_size};"
+ for p in soup.find('div', id='nota_pie').findChildren('p', recursive=False):
+ p['style'] = f'font-size: {font_size};'
return soup
def parse_index(self):
soup_index = self.index_to_soup(self.INDEX)
- tag_sumario = soup_index.find("span", text="Sumario")
+ tag_sumario = soup_index.find('span', text='Sumario')
if tag_sumario is None:
return None
- url_sumario = "https://www.eldiplo.org" + tag_sumario.parent["href"]
+ url_sumario = 'https://www.eldiplo.org' + tag_sumario.parent['href']
self.log(url_sumario)
soup_sumario = self.index_to_soup(url_sumario)
@@ -128,20 +128,20 @@ class ElDiplo2023(BasicNewsRecipe):
articles = []
dossiers = []
- sumario = soup_sumario.find("div", class_="sumario")
+ sumario = soup_sumario.find('div', class_='sumario')
- for section in sumario.find_all("div", recursive=False):
- classes = section.attrs["class"]
+ for section in sumario.find_all('div', recursive=False):
+ classes = section.attrs['class']
- if "dossier" in classes:
- dtitle = self.tag_to_string(section.find("h3"))
+ if 'dossier' in classes:
+ dtitle = self.tag_to_string(section.find('h3'))
darticles = []
- for article in section.find_all("div", recursive=False):
+ for article in section.find_all('div', recursive=False):
darticles.append(self._process_article(article))
dossiers.append((dtitle, darticles))
else:
articles.append(self._process_article(section))
- feeds.append(("Artículos", articles))
+ feeds.append(('Artículos', articles))
feeds += dossiers
return feeds
diff --git a/recipes/el_pais.recipe b/recipes/el_pais.recipe
index ffe83b57dc..b2d1bd90a5 100644
--- a/recipes/el_pais.recipe
+++ b/recipes/el_pais.recipe
@@ -119,11 +119,11 @@ div.a_md_a {text-align: center; text-transform: uppercase; font-size: .8rem;}
try:
br.open(cover)
except:
- self.log("\nCover unavailable")
+ self.log('\nCover unavailable')
cover = None
return cover
def image_url_processor(cls, baseurl, url):
- splitUrl = url.split("cloudfront-")
+ splitUrl = url.split('cloudfront-')
parsedUrl = 'https://cloudfront-' + splitUrl[1]
return parsedUrl
diff --git a/recipes/el_pais_babelia.recipe b/recipes/el_pais_babelia.recipe
index 88049a91b1..3fff2db54d 100644
--- a/recipes/el_pais_babelia.recipe
+++ b/recipes/el_pais_babelia.recipe
@@ -36,7 +36,7 @@ class ElPaisBabelia(BasicNewsRecipe):
title = self.tag_to_string(post)
if str(post).find('class=') > 0:
klass = post['class']
- if klass != "":
+ if klass != '':
self.log()
self.log('--> post: ', post)
self.log('--> url: ', url)
diff --git a/recipes/elcohetealaluna.recipe b/recipes/elcohetealaluna.recipe
index 1ff45144b5..4e33f113ef 100644
--- a/recipes/elcohetealaluna.recipe
+++ b/recipes/elcohetealaluna.recipe
@@ -28,12 +28,12 @@ class elcohetealaluna(BasicNewsRecipe):
compress_news_images = True
masthead_url = 'https://www.elcohetealaluna.com/wp-content/uploads/2018/06/logo-menu.png'
- extra_css = """
+ extra_css = '''
body{font-family: Georgia, Times, "Times New Roman", serif}
h1,h2,.post-author-name{font-family: Oswald, sans-serif}
h2{color: gray}
img{margin-top:1em; margin-bottom: 1em; display:block}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/elcronista-arg.recipe b/recipes/elcronista-arg.recipe
index 9cc1f908ae..4ada20b514 100644
--- a/recipes/elcronista-arg.recipe
+++ b/recipes/elcronista-arg.recipe
@@ -28,10 +28,10 @@ class ElCronistaArg(BasicNewsRecipe):
auto_cleanup_keep = '//div[@class="header-bottom"] | //h1 | //h2'
ignore_duplicate_articles = {'url'}
masthead_url = 'https://www.cronista.com/export/sites/diarioelcronista/arte/v2/lg_cronista_footer.png_665574830.png'
- extra_css = """
+ extra_css = '''
body{font-family: 'Source Sans Pro', sans-serif}
h1,h2,h3,h4{font-family: 'Libre Baskerville', serif}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/elektroda_pl.recipe b/recipes/elektroda_pl.recipe
index 2a5550ae40..d5361c4407 100644
--- a/recipes/elektroda_pl.recipe
+++ b/recipes/elektroda_pl.recipe
@@ -29,5 +29,5 @@ class Elektroda(BasicNewsRecipe):
feeds = BasicNewsRecipe.parse_feeds(self)
for feed in feeds:
for article in feed.articles[:]:
- article.title = article.title[article.title.find("::") + 3:]
+ article.title = article.title[article.title.find('::') + 3:]
return feeds
diff --git a/recipes/elmundo.recipe b/recipes/elmundo.recipe
index 1432aa1715..c4ed15ce6a 100644
--- a/recipes/elmundo.recipe
+++ b/recipes/elmundo.recipe
@@ -35,14 +35,14 @@ class ElMundo(BasicNewsRecipe):
articles_are_obfuscated = True
auto_cleanup = True
temp_files = []
- extra_css = """
+ extra_css = '''
body{font-family: "PT serif",Georgia,serif,times}
.metadata_noticia{font-size: small}
.pestana_GDP{font-size: small; font-weight:bold}
h1 {color: #333333; font-family: "Clear Sans Bold",Arial,sans-serif,helvetica}
.hora{color: red}
.update{color: gray}
- """
+ '''
conversion_options = {
'comments': description, 'tags': category, 'language': language, 'publisher': publisher
@@ -83,14 +83,14 @@ class ElMundo(BasicNewsRecipe):
cover = self.masthead_url
st = time.localtime()
year = str(st.tm_year)
- month = "%.2d" % st.tm_mon
- day = "%.2d" % st.tm_mday
+ month = '%.2d' % st.tm_mon
+ day = '%.2d' % st.tm_mday
cover = 'http://img.kiosko.net/' + year + '/' + \
month + '/' + day + '/es/elmundo.750.jpg'
try:
self.browser.open(cover)
except:
- self.log("\nPortada no disponible")
+ self.log('\nPortada no disponible')
return cover
def get_obfuscated_article(self, url):
@@ -103,7 +103,7 @@ class ElMundo(BasicNewsRecipe):
html = response.read()
count = tries
except:
- print("Retrying download...")
+ print('Retrying download...')
count += 1
if html is not None:
tfile = PersistentTemporaryFile('_fa.html')
diff --git a/recipes/elperiodico_spanish.recipe b/recipes/elperiodico_spanish.recipe
index b6c591c48f..8aab5a542f 100644
--- a/recipes/elperiodico_spanish.recipe
+++ b/recipes/elperiodico_spanish.recipe
@@ -66,7 +66,7 @@ class ElPeriodico_cat(BasicNewsRecipe):
def preprocess_html(self, soup):
mcharset = new_tag(soup, 'meta', [
- ("http-equiv", "Content-Type"), ("content", "text/html; charset=utf-8")])
+ ('http-equiv', 'Content-Type'), ('content', 'text/html; charset=utf-8')])
soup.head.insert(0, mcharset)
for item in soup.findAll(style=True):
del item['style']
diff --git a/recipes/en_globes_co_il.recipe b/recipes/en_globes_co_il.recipe
index 2ad5aac6af..0dbe822972 100644
--- a/recipes/en_globes_co_il.recipe
+++ b/recipes/en_globes_co_il.recipe
@@ -18,18 +18,18 @@ class En_Globes_Recipe(BasicNewsRecipe):
max_articles_per_feed = 100
feeds = [
- (u"Main Headlines", u"https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederNode?iID=942"),
- (u"Israeli stocks on Wall Street", u"https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1392"),
- (u"All news", u"https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=1725"),
- (u"Macro economics", u"https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1389"),
- (u"Aerospace and defense", u"https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1380"),
- (u"Real estate", u"https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederKeyword?iID=1385"),
- (u"Energy and water", u"https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1382"),
- (u"Start-ups and venture capital", u"https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1397"),
- (u"Financial services", u"https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1383"),
- (u"Tel Aviv markets", u"https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1404"),
- (u"Healthcare", u"https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1377"),
- (u"Telecommunications", u"https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1386"),
- (u"Information technology", u"https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1376"),
- (u"Transport and infrastructure", u"https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1388"),
+ (u'Main Headlines', u'https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederNode?iID=942'),
+ (u'Israeli stocks on Wall Street', u'https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1392'),
+ (u'All news', u'https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=1725'),
+ (u'Macro economics', u'https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1389'),
+ (u'Aerospace and defense', u'https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1380'),
+ (u'Real estate', u'https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederKeyword?iID=1385'),
+ (u'Energy and water', u'https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1382'),
+ (u'Start-ups and venture capital', u'https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1397'),
+ (u'Financial services', u'https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1383'),
+ (u'Tel Aviv markets', u'https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1404'),
+ (u'Healthcare', u'https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1377'),
+ (u'Telecommunications', u'https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1386'),
+ (u'Information technology', u'https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1376'),
+ (u'Transport and infrastructure', u'https://www.globes.co.il/WebService/Rss/RssFeeder.asmx/FeederKeyword?iID=1388'),
]
diff --git a/recipes/endgadget.recipe b/recipes/endgadget.recipe
index 451df50f26..df10afd02a 100644
--- a/recipes/endgadget.recipe
+++ b/recipes/endgadget.recipe
@@ -87,8 +87,8 @@ class Engadget(BasicNewsRecipe):
except KeyError:
continue
# Reorder the "title" and "content" elements
- title_div = soup.find("div", {"class": "caas-title-wrapper"})
- content_div = soup.find("div", {"class": "caas-content-wrapper"})
+ title_div = soup.find('div', {'class': 'caas-title-wrapper'})
+ content_div = soup.find('div', {'class': 'caas-content-wrapper'})
if title_div and content_div:
soup.body.clear()
soup.body.append(title_div)
diff --git a/recipes/equestria_daily.recipe b/recipes/equestria_daily.recipe
index b0d15db6a9..63d8b48d84 100644
--- a/recipes/equestria_daily.recipe
+++ b/recipes/equestria_daily.recipe
@@ -5,12 +5,12 @@ from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1639926896(BasicNewsRecipe):
- __author__ = "Aisteru"
- __copyright__ = "2021, Timothée Andres "
+ __author__ = 'Aisteru'
+ __copyright__ = '2021, Timothée Andres '
__license__ = 'GNU General Public License v3 - http://www.gnu.org/copyleft/gpl.html'
- title = "Equestria Daily"
- description = "Everything new in Equestria and beyond!"
+ title = 'Equestria Daily'
+ description = 'Everything new in Equestria and beyond!'
language = 'en'
# Max. supported by website: 50
@@ -29,13 +29,13 @@ class AdvancedUserRecipe1639926896(BasicNewsRecipe):
# To discard posts under a certain section, simply comment the whole line
sections = [
- ("Art", 'Art'),
- ("News", 'News'),
- ("Fics", 'Fanfiction'),
- ("Media", 'Media'),
- ("Comics", 'Comic'),
- ("Community", 'Community'),
- ("Editorial", 'Editorial'),
+ ('Art', 'Art'),
+ ('News', 'News'),
+ ('Fics', 'Fanfiction'),
+ ('Media', 'Media'),
+ ('Comics', 'Comic'),
+ ('Community', 'Community'),
+ ('Editorial', 'Editorial'),
]
def get_masthead_url(self):
diff --git a/recipes/expansion_spanish.recipe b/recipes/expansion_spanish.recipe
index 812254c6c0..783bcffc49 100644
--- a/recipes/expansion_spanish.recipe
+++ b/recipes/expansion_spanish.recipe
@@ -106,15 +106,15 @@ class expansion_spanish(BasicNewsRecipe):
cover = None
st = time.localtime()
year = str(st.tm_year)
- month = "%.2d" % st.tm_mon
- day = "%.2d" % st.tm_mday
+ month = '%.2d' % st.tm_mon
+ day = '%.2d' % st.tm_mday
cover = 'http://img5.kiosko.net/' + year + '/' + \
month + '/' + day + '/es/expansion.750.jpg'
br = BasicNewsRecipe.get_browser(self)
try:
br.open(cover)
except:
- self.log("\nPortada no disponible")
+ self.log('\nPortada no disponible')
cover = 'http://www.aproahp.org/enlaces/images/diario_expansion.gif'
return cover
@@ -138,13 +138,13 @@ class expansion_spanish(BasicNewsRecipe):
link = article.get('link', None)
if link is None:
return article
- if link.split('/')[-1] == "story01.htm":
+ if link.split('/')[-1] == 'story01.htm':
link = link.split('/')[-2]
a = ['0B', '0C', '0D', '0E', '0F', '0G', '0N', '0L0S', '0A']
b = ['.', '/', '?', '-', '=', '&', '.com', 'www.', '0']
for i in range(0, len(a)):
link = link.replace(a[i], b[i])
- link = "http://" + link
+ link = 'http://' + link
# Eliminar artículos duplicados en otros feeds
diff --git a/recipes/fastcompany.recipe b/recipes/fastcompany.recipe
index c867c7b8ae..aeb295b973 100644
--- a/recipes/fastcompany.recipe
+++ b/recipes/fastcompany.recipe
@@ -51,9 +51,9 @@ class FastCompany(BasicNewsRecipe):
soup.html['xml:lang'] = self.lang
soup.html['lang'] = self.lang
mlang = new_tag(soup, 'meta', [
- ("http-equiv", "Content-Language"), ("content", self.lang)])
+ ('http-equiv', 'Content-Language'), ('content', self.lang)])
mcharset = new_tag(soup, 'meta', [
- ("http-equiv", "Content-Type"), ("content", "text/html; charset=UTF-8")])
+ ('http-equiv', 'Content-Type'), ('content', 'text/html; charset=UTF-8')])
soup.head.insert(0, mlang)
soup.head.insert(1, mcharset)
for item in soup.findAll('a'):
diff --git a/recipes/faz_net.recipe b/recipes/faz_net.recipe
index 90b789a2c4..093b6b320b 100644
--- a/recipes/faz_net.recipe
+++ b/recipes/faz_net.recipe
@@ -21,7 +21,7 @@ def format_tickaroo_liveblog(soup):
#format liveblogs
for tag in soup.findAll('time'):
- ntag = soup.new_tag("br")
+ ntag = soup.new_tag('br')
tag.insert_before(ntag)
for tag in soup.findAll(class_ = 'tik4-author__wrapper'):
@@ -61,14 +61,14 @@ def bilderstrecke(soup,tag):
# head.append(struct[i-1])
cap = soup.new_tag('p')
cap.append(struct[int(v['caption'])])
- cap['class'] = "body-elements__image-figcaption"
+ cap['class'] = 'body-elements__image-figcaption'
if 'source' in v.keys():
cred = soup.new_tag('span')
cred.append(struct[int(v['source'])])
- cred['class'] = "body-elements__image-figcaption--source"
+ cred['class'] = 'body-elements__image-figcaption--source'
cap.append(cred)
if 'defaultUrl' in v.keys():
- fig = soup.new_tag("figure")
+ fig = soup.new_tag('figure')
img = soup.new_tag('img')
img['src'] = struct[int(v['defaultUrl'])]
fig.append(img)
@@ -145,7 +145,7 @@ class FazNet(BasicNewsRecipe):
'tik4-by','header-detail__image','mm-adbox','upper-toolbar content-container'
]}),
# dict(name ='script'),
- dict(name = "style"),
+ dict(name = 'style'),
dict(name='svg'),
dict(name='div', attrs={'data-module':'teaser'}),
@@ -215,9 +215,9 @@ class FazNet(BasicNewsRecipe):
for par in soup.findAll('p'):
if len(par.contents) == 1:
cont = str(par.contents[0])
- if re.search(r"^[1-9]\d* Bilder$",cont):
+ if re.search(r'^[1-9]\d* Bilder$',cont):
# print(cont)
- for tag in soup.findAll('script',attrs={'id':"__NUXT_DATA__",'type':'application/json'}):
+ for tag in soup.findAll('script',attrs={'id':'__NUXT_DATA__','type':'application/json'}):
bilderstrecke(soup,tag)
break
break
@@ -227,14 +227,14 @@ class FazNet(BasicNewsRecipe):
tag.unwrap()
# remove ":""
- tag = soup.find(class_ ="header-label__content")
+ tag = soup.find(class_ ='header-label__content')
if tag:
- colon=tag.find(class_ ="sr-only")
+ colon=tag.find(class_ ='sr-only')
if colon:
colon.extract()
# Skip articles behind paywall
- if soup.find(id = "faz-paywall"):
+ if soup.find(id = 'faz-paywall'):
self.abort_article('Skipping paywalled article')
# Remove F.A.Z. ad
@@ -271,5 +271,5 @@ class FazNet(BasicNewsRecipe):
text = str(tag.string)
text = text.strip()
if text != '' and text[-1] not in ['.','?','!',':']:
- tag.string.replace_with(text + ".")
+ tag.string.replace_with(text + '.')
return self.adeify_images(soup)
diff --git a/recipes/financial_times.recipe b/recipes/financial_times.recipe
index 337f6ea949..5df77f095d 100644
--- a/recipes/financial_times.recipe
+++ b/recipes/financial_times.recipe
@@ -13,7 +13,7 @@ from calibre.web.feeds.news import BasicNewsRecipe, classes
class ft(BasicNewsRecipe):
title = 'Financial Times'
language = 'en'
- __author__ = "Kovid Goyal"
+ __author__ = 'Kovid Goyal'
description = 'The Financial Times is one of the world’s leading news organisations, recognised internationally for its authority, integrity and accuracy.'
oldest_article = 1.15
max_articles_per_feed = 50
diff --git a/recipes/financialsense.recipe b/recipes/financialsense.recipe
index fdf45c45b8..6bc1bc23aa 100644
--- a/recipes/financialsense.recipe
+++ b/recipes/financialsense.recipe
@@ -22,12 +22,12 @@ class FinancialSense(BasicNewsRecipe):
remove_empty_feeds = True
publication_type = 'newsportal'
masthead_url = 'http://www.financialsense.com/sites/default/files/logo.jpg'
- extra_css = """
+ extra_css = '''
body{font-family: Arial,"Helvetica Neue",Helvetica,sans-serif }
img{margin-bottom: 0.4em; display:block}
h2{color: gray}
.name{margin-right: 5em}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/first_things.recipe b/recipes/first_things.recipe
index c044afa125..17d6a18eb2 100644
--- a/recipes/first_things.recipe
+++ b/recipes/first_things.recipe
@@ -16,7 +16,7 @@ class FirstThings(BasicNewsRecipe):
title = 'First Things'
__author__ = 'John Hutson'
- description = 'America\'s Most Influential Journal of Religion and Public Life'
+ description = "America's Most Influential Journal of Religion and Public Life"
INDEX = 'https://www.firstthings.com/current-edition'
language = 'en'
encoding = 'utf-8'
diff --git a/recipes/flickr.recipe b/recipes/flickr.recipe
index 63a1b9e66f..474648932e 100644
--- a/recipes/flickr.recipe
+++ b/recipes/flickr.recipe
@@ -31,13 +31,13 @@ class AdvancedUserRecipe1297031650(BasicNewsRecipe):
remove_javascript = True
language = 'en'
- extra_css = """
+ extra_css = '''
p{text-align: justify; font-size: 100%}
body{ text-align: left; font-size:100% }
h2{font-family: sans-serif; font-size:130%; font-weight:bold; text-align: justify; }
.published{font-family:Arial,Helvetica,sans-serif; font-size:80%; }
.posted{font-family:Arial,Helvetica,sans-serif; font-size:80%; }
- """
+ '''
keep_only_tags = [
dict(name='div', attrs={'class': 'entry'})
diff --git a/recipes/flickr_es.recipe b/recipes/flickr_es.recipe
index 377e7f154a..b6049c7b2a 100644
--- a/recipes/flickr_es.recipe
+++ b/recipes/flickr_es.recipe
@@ -31,13 +31,13 @@ class AdvancedUserRecipe1297031650(BasicNewsRecipe):
remove_javascript = True
language = 'es'
- extra_css = """
+ extra_css = '''
p{text-align: justify; font-size: 100%}
body{ text-align: left; font-size:100% }
h2{font-family: sans-serif; font-size:130%; font-weight:bold; text-align: justify; }
.published{font-family:Arial,Helvetica,sans-serif; font-size:80%; }
.posted{font-family:Arial,Helvetica,sans-serif; font-size:80%; }
- """
+ '''
keep_only_tags = [
dict(name='div', attrs={'class': 'entry'})
diff --git a/recipes/fokus.recipe b/recipes/fokus.recipe
index 20321d5efe..2bcb22ba19 100644
--- a/recipes/fokus.recipe
+++ b/recipes/fokus.recipe
@@ -160,7 +160,7 @@ class Fokus(BasicNewsRecipe):
"""
def _log(article) -> None:
- """Log a digestible summary of the input `article` blurb."""
+ '''Log a digestible summary of the input `article` blurb.'''
log_message = f"\t{article['title']} : {article['date']} : {article['url']}"
if article.get('description'):
log_message += f" : {article['description']}"
@@ -224,7 +224,7 @@ class Fokus(BasicNewsRecipe):
sections: dict[str, str],
articles: dict[str, dict[str, str, str, str]],
) -> dict[str, list[dict[str, str, str, str]]]:
- """Assign each article in `articles` to a section in `sections`.
+ '''Assign each article in `articles` to a section in `sections`.
Args:
sections (dict[str, str]): A dict of section URLs as keys and section titles as values.
@@ -232,7 +232,7 @@ class Fokus(BasicNewsRecipe):
Returns:
dict[str, list[dict[str, str, str, str]]]: A dict on a `{section_title: list[article_dict]}` format.
- """
+ '''
self.log(f'Assigning each of the {len(articles)} articles to either of the {len(sections)} sections...')
section_to_articles = {}
for article_url, article_dict in articles.items():
diff --git a/recipes/folha.recipe b/recipes/folha.recipe
index 3bc31aee0e..91a9371c28 100644
--- a/recipes/folha.recipe
+++ b/recipes/folha.recipe
@@ -28,10 +28,10 @@ class Folha_de_s_paulo(BasicNewsRecipe):
remove_empty_feeds = True
publication_type = 'newspaper'
masthead_url = 'http://f.i.uol.com.br/fsp/furniture/images/lgo-fsp-430x50-ffffff.gif'
- extra_css = """
+ extra_css = '''
body{font-family: Arial,Helvetica,sans-serif }
img{margin-bottom: 0.4em; display:block}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/folhadesaopaulo_sub.recipe b/recipes/folhadesaopaulo_sub.recipe
index 0b1d263cca..e9388030a2 100644
--- a/recipes/folhadesaopaulo_sub.recipe
+++ b/recipes/folhadesaopaulo_sub.recipe
@@ -49,7 +49,7 @@ class FSP(BasicNewsRecipe):
# this solves the problem with truncated content in Kindle
conversion_options = {'linearize_tables': True}
- extra_css = """
+ extra_css = '''
#articleNew { font: 18px Times New Roman,verdana,arial; }
img { background: none !important; float: none; margin: 0px; }
.newstexts { list-style-type: none; height: 20px; margin: 15px 0 10px 0; }
@@ -82,14 +82,14 @@ img { background: none !important; float: none; margin: 0px; }
.divisor { text-indent: -9999px; border-bottom: 1px solid #ccc; height: 1px; margin: 0; }
.star { background: none !important; height: 15px; }
.articleGraphic { margin-bottom: 20px; }
-"""
+'''
# This is the code for login, here a mini browser is called and id entered
def get_browser(self):
br = BasicNewsRecipe.get_browser(self)
if self.username is not None and self.password is not None:
br.open('https://login.folha.com.br/login')
- br.select_form(action="https://login.folha.com.br/login")
+ br.select_form(action='https://login.folha.com.br/login')
br['email'] = self.username
br['password'] = self.password
br.submit()
diff --git a/recipes/foreign_policy.recipe b/recipes/foreign_policy.recipe
index 89d8fc5b2d..10b969c756 100644
--- a/recipes/foreign_policy.recipe
+++ b/recipes/foreign_policy.recipe
@@ -85,7 +85,7 @@ class ForeignPolicy(BasicNewsRecipe):
if dek:
desc += ' | ' + self.tag_to_string(dek)
self.log('\t', title, url, '\n\t', desc)
- feeds_dict[current_section].append({"title": title, "url": url, "description": desc})
+ feeds_dict[current_section].append({'title': title, 'url': url, 'description': desc})
return [(section, articles) for section, articles in feeds_dict.items()]
def preprocess_html(self, soup):
diff --git a/recipes/foreignaffairs.recipe b/recipes/foreignaffairs.recipe
index 4ca3d684f5..3d049036a8 100644
--- a/recipes/foreignaffairs.recipe
+++ b/recipes/foreignaffairs.recipe
@@ -37,20 +37,20 @@ def get_issue_data(br, log, node_id='1126213', year='2020', volnum='99', issue_v
return {
'from': 0,
'post_filter': {'bool': q},
- "_source": {
- "includes": [
- "nid", 'path', 'title', 'field_subtitle', 'field_display_authors',
+ '_source': {
+ 'includes': [
+ 'nid', 'path', 'title', 'field_subtitle', 'field_display_authors',
'fa_node_type_or_subtype',
'field_issue_sspecial_articles__nid',
'field_issue_sspecial_header'
]
},
- "query": {
- "match_all": {}
+ 'query': {
+ 'match_all': {}
},
- 'sort': [{'field_sequence': "asc"}, {'fa_normalized_date': "desc"}],
- "size": size,
+ 'sort': [{'field_sequence': 'asc'}, {'fa_normalized_date': 'desc'}],
+ 'size': size,
}
def get_data(data):
@@ -171,9 +171,9 @@ class ForeignAffairsRecipe(BasicNewsRecipe):
cov = main.find('img', attrs={'srcset': lambda x: x and 'Cover.jpg' in x})
if cov:
self.cover_url = re.sub(
- r"_webp_issue_small_\dx",
- "_webp_issue_large_2x",
- cov["srcset"].split()[0]
+ r'_webp_issue_small_\dx',
+ '_webp_issue_large_2x',
+ cov['srcset'].split()[0]
)
cls = soup.find('link', attrs={'rel':'shortlink'})['href']
@@ -188,7 +188,7 @@ class ForeignAffairsRecipe(BasicNewsRecipe):
for by in soup.findAll(**classes('topper__byline topper__date font-style-italic')):
by.name = 'div'
for img in soup.find_all('img', attrs={'srcset': True}):
- img['src'] = re.sub(r"_webp_small_\dx", "_webp_large_1x",img['srcset'].split()[0])
+ img['src'] = re.sub(r'_webp_small_\dx', '_webp_large_1x',img['srcset'].split()[0])
return soup
def get_browser(self):
diff --git a/recipes/foxnews.recipe b/recipes/foxnews.recipe
index 402b2ea894..4949049fd2 100644
--- a/recipes/foxnews.recipe
+++ b/recipes/foxnews.recipe
@@ -25,11 +25,11 @@ class FoxNews(BasicNewsRecipe):
language = 'en_US'
remove_empty_feeds = True
- extra_css = """
+ extra_css = '''
body{font-family: Arial,sans-serif }
.caption{font-size: x-small}
.author,.dateline{font-size: small}
- """
+ '''
recipe_specific_options = {
'days': {
diff --git a/recipes/free_inquiry.recipe b/recipes/free_inquiry.recipe
index ced2c2f160..882b8f5c67 100644
--- a/recipes/free_inquiry.recipe
+++ b/recipes/free_inquiry.recipe
@@ -25,14 +25,14 @@ class FreeInquiry(BasicNewsRecipe):
ignore_duplicate_articles = {'url'}
remove_empty_feeds = True
needs_subscription = True
- extra_css = """
+ extra_css = '''
.entry-header{
text-transform: uppercase;
vertical-align: baseline;
display: inline;
}
ul li{display: inline}
- """
+ '''
remove_tags = [
classes(
diff --git a/recipes/frontline.recipe b/recipes/frontline.recipe
index f1a4d8af36..7a435ac732 100644
--- a/recipes/frontline.recipe
+++ b/recipes/frontline.recipe
@@ -95,5 +95,5 @@ class Frontline(BasicNewsRecipe):
if not url or not title:
continue
self.log(section, '\n\t', title, '\n\t', desc, '\n\t\t', url)
- feeds_dict[section].append({"title": title, "url": url, "description": desc})
+ feeds_dict[section].append({'title': title, 'url': url, 'description': desc})
return [(section, articles) for section, articles in feeds_dict.items()]
diff --git a/recipes/galaxys_edge.recipe b/recipes/galaxys_edge.recipe
index 233277a29c..8d42d4abef 100644
--- a/recipes/galaxys_edge.recipe
+++ b/recipes/galaxys_edge.recipe
@@ -34,18 +34,18 @@ class AdvancedUserRecipe1515196393(BasicNewsRecipe):
issue_title = soup.find('h1')
self.title = "Galaxy's Edge: " + self.tag_to_string(issue_title).lower().title()
toc = soup.find('div', attrs={'class':'nav-tabs'})
- current_section = "Articles"
+ current_section = 'Articles'
current_articles = []
feeds = []
br = self.get_browser()
self.ctdir = PersistentTemporaryDirectory()
- for x in toc.findAll(['li'], attrs={"class": re.compile(".*get_content.*")}):
+ for x in toc.findAll(['li'], attrs={'class': re.compile('.*get_content.*')}):
edwo = x.find('a')
title = self.tag_to_string(edwo)
self.log('\t\tFound article:', title)
- post_id = x["data-post-id"]
- cat_id = x["data-cat-id"]
- parent_id = x["data-parent-id"]
+ post_id = x['data-post-id']
+ cat_id = x['data-cat-id']
+ parent_id = x['data-parent-id']
self.log('\t\tdata-parent-id', parent_id)
self.log('\t\tdata-cat-id', cat_id)
self.log('\t\tdata-post-id', post_id)
@@ -61,5 +61,5 @@ class AdvancedUserRecipe1515196393(BasicNewsRecipe):
return feeds
def cleanup(self):
- self.log("Deleting temp files...")
+ self.log('Deleting temp files...')
shutil.rmtree(self.ctdir)
diff --git a/recipes/gazeta-prawna-calibre-v1.recipe b/recipes/gazeta-prawna-calibre-v1.recipe
index e78a9f1861..71cca67d83 100644
--- a/recipes/gazeta-prawna-calibre-v1.recipe
+++ b/recipes/gazeta-prawna-calibre-v1.recipe
@@ -69,8 +69,8 @@ class gazetaprawna(BasicNewsRecipe):
parsed_feeds = BasicNewsRecipe.parse_feeds(self)
for n, feed in enumerate(parsed_feeds):
for a, article in enumerate(feed):
- article.text_summary = re.sub(r'<\!\[CDATA\[', "", article.text_summary)
- article.text_summary = re.sub(r'\]\]', "", article.text_summary)
+ article.text_summary = re.sub(r'<\!\[CDATA\[', '', article.text_summary)
+ article.text_summary = re.sub(r'\]\]', '', article.text_summary)
article.summary = article.text_summary
return parsed_feeds
@@ -84,10 +84,10 @@ class gazetaprawna(BasicNewsRecipe):
for span in soup.findAll(name='span'):
if len(self.tag_to_string(span)) > 1:
- span.append(" ")
+ span.append(' ')
for locked in soup.findAll(name='div', attrs={'class': ['articleGate']}):
- locked.append(u"Przejd\u017a do artyku\u0142u na GazetaPrawna.pl aby zalogowa\u0107 si\u0119 lub wykupi\u0107 dost\u0119p")
+ locked.append(u'Przejd\u017a do artyku\u0142u na GazetaPrawna.pl aby zalogowa\u0107 si\u0119 lub wykupi\u0107 dost\u0119p')
return soup
@@ -108,5 +108,5 @@ class gazetaprawna(BasicNewsRecipe):
def get_cover_url(self):
soup = self.index_to_soup(
'http://www.egazety.pl/infor/e-wydanie-dziennik-gazeta-prawna.html')
- self.cover_url = soup.find("a", {"class": "image cover-preview"}).img['src']
+ self.cover_url = soup.find('a', {'class': 'image cover-preview'}).img['src']
return getattr(self, 'cover_url', self.cover_url)
diff --git a/recipes/globes_co_il.recipe b/recipes/globes_co_il.recipe
index 627801bce6..8102642749 100644
--- a/recipes/globes_co_il.recipe
+++ b/recipes/globes_co_il.recipe
@@ -21,21 +21,21 @@ class AdvancedUserRecipe1283848012(BasicNewsRecipe):
remove_attributes = ['width', 'style']
feeds = [
- (u"עידכוני RSS ", u"https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=3038"),
- (u"כל הכתבות", u"https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=2"),
- (u"שוק ההון", u"https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=585"),
- (u"בארץ", u"https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=9917"),
- (u"גלובלי ושוקי עולם", u"https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=1225"),
- (u"גלובסטק", u"https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=594"),
- (u"דין וחשבון", u"https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=829"),
- (u"דעות", u"https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=845"),
- (u"וידאו", u"https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=2007"),
- (u"ליידי גלובס", u"https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=3314"),
- (u"מגזין G", u"https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=3312"),
- (u"nadlan", u"https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=607"),
- (u"נתח שוק וצרכנות", u"https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=821"),
- (u"מטבעות דיגיטליים", u"https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=9758"),
- (u"קריירה", u"https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iid=3266"),
- (u"תיירות", u"https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iid=9010"),
- (u"רכב", u"https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=3220")
+ (u'עידכוני RSS ', u'https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=3038'),
+ (u'כל הכתבות', u'https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=2'),
+ (u'שוק ההון', u'https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=585'),
+ (u'בארץ', u'https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=9917'),
+ (u'גלובלי ושוקי עולם', u'https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=1225'),
+ (u'גלובסטק', u'https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=594'),
+ (u'דין וחשבון', u'https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=829'),
+ (u'דעות', u'https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=845'),
+ (u'וידאו', u'https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=2007'),
+ (u'ליידי גלובס', u'https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=3314'),
+ (u'מגזין G', u'https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=3312'),
+ (u'nadlan', u'https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=607'),
+ (u'נתח שוק וצרכנות', u'https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=821'),
+ (u'מטבעות דיגיטליים', u'https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=9758'),
+ (u'קריירה', u'https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iid=3266'),
+ (u'תיירות', u'https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iid=9010'),
+ (u'רכב', u'https://www.globes.co.il/webservice/rss/rssfeeder.asmx/FeederNode?iID=3220')
]
diff --git a/recipes/go_comics.recipe b/recipes/go_comics.recipe
index 57c23d87e1..fbd79f714a 100644
--- a/recipes/go_comics.recipe
+++ b/recipes/go_comics.recipe
@@ -61,7 +61,7 @@ class GoComics(BasicNewsRecipe):
# (u"Andertoons",u"http://www.gocomics.com/andertoons"),
# (u"Andy Capp",u"http://www.gocomics.com/andycapp"),
# (u"Angry Little Girls",u"http://www.gocomics.com/angry-little-girls"),
- (u"Animal Crackers", u"http://www.gocomics.com/animalcrackers"),
+ (u'Animal Crackers', u'http://www.gocomics.com/animalcrackers'),
# (u"Annie",u"http://www.gocomics.com/annie"),
# (u"The Argyle Sweater",u"http://www.gocomics.com/theargylesweater"),
# (u"Robert Ariail",u"http://www.gocomics.com/robert-ariail"),
@@ -71,14 +71,14 @@ class GoComics(BasicNewsRecipe):
# (u"At the Zoo",u"http://www.gocomics.com/at-the-zoo"),
# (u"Aunty Acid",u"http://www.gocomics.com/aunty-acid"),
# (u"The Awkward Yeti",u"http://www.gocomics.com/the-awkward-yeti"),
- (u"B.C.",u"http://www.gocomics.com/bc"),
+ (u'B.C.',u'http://www.gocomics.com/bc'),
# (u"Back to B.C.",u"http://www.gocomics.com/back-to-bc"),
# (u"Back in the Day",u"http://www.gocomics.com/backintheday"),
# (u"bacon",u"http://www.gocomics.com/bacon"),
# (u"Bad Machinery",u"http://www.gocomics.com/bad-machinery"),
# (u"Bad Reporter",u"http://www.gocomics.com/badreporter"),
# (u"Badlands",u"http://www.gocomics.com/badlands"),
- (u"Baldo",u"http://www.gocomics.com/baldo"),
+ (u'Baldo',u'http://www.gocomics.com/baldo'),
# (u"Ballard Street",u"http://www.gocomics.com/ballardstreet"),
# (u"Banana Triangle",u"http://www.gocomics.com/banana-triangle"),
# (u"Barkeater Lake Pandolph",u"http://www.gocomics.com/barkeaterlake"),
@@ -115,10 +115,10 @@ class GoComics(BasicNewsRecipe):
# (u"Chip Bok",u"http://www.gocomics.com/chipbok"),
# (u"Boomerangs",u"http://www.gocomics.com/boomerangs"),
# (u"The Boondocks",u"http://www.gocomics.com/boondocks"),
- (u"The Born Loser",u"http://www.gocomics.com/the-born-loser"),
+ (u'The Born Loser',u'http://www.gocomics.com/the-born-loser'),
# (u"Matt Bors",u"http://www.gocomics.com/matt-bors"),
# (u"Bottomliners",u"http://www.gocomics.com/bottomliners"),
- (u"Bound and Gagged",u"http://www.gocomics.com/boundandgagged"),
+ (u'Bound and Gagged',u'http://www.gocomics.com/boundandgagged'),
# (u"Brain Squirts",u"http://www.gocomics.com/brain-squirts"),
# (u"Break of Day",u"http://www.gocomics.com/break-of-day"),
# (u"Breaking Cat News",u"http://www.gocomics.com/breaking-cat-news"),
@@ -126,12 +126,12 @@ class GoComics(BasicNewsRecipe):
# (u"Brevity",u"http://www.gocomics.com/brevitypanel"),
# (u"Brewster Rockit",u"http://www.gocomics.com/brewsterrockit"),
# (u"Chris Britt",u"http://www.gocomics.com/chrisbritt"),
- (u"Broom Hilda",u"http://www.gocomics.com/broomhilda"),
+ (u'Broom Hilda',u'http://www.gocomics.com/broomhilda'),
# (u"The Buckets",u"http://www.gocomics.com/thebuckets"),
# (u"Bully",u"http://www.gocomics.com/bully"),
# (u"Buni",u"http://www.gocomics.com/buni"),
# (u"Bushy Tales",u"http://www.gocomics.com/bushy-tales"),
- (u"Calvin and Hobbes",u"http://www.gocomics.com/calvinandhobbes"),
+ (u'Calvin and Hobbes',u'http://www.gocomics.com/calvinandhobbes'),
# (u"Candorville",u"http://www.gocomics.com/candorville"),
# (u"Stuart Carlson",u"http://www.gocomics.com/stuartcarlson"),
# (u"Ken Catalino",u"http://www.gocomics.com/kencatalino"),
@@ -202,7 +202,7 @@ class GoComics(BasicNewsRecipe):
# (u"Flo and Friends",u"http://www.gocomics.com/floandfriends"),
# (u"The Flying McCoys",u"http://www.gocomics.com/theflyingmccoys"),
# (u"Foolish Mortals",u"http://www.gocomics.com/foolish-mortals"),
- (u"For Better or For Worse",u"http://www.gocomics.com/forbetterorforworse"),
+ (u'For Better or For Worse',u'http://www.gocomics.com/forbetterorforworse'),
# (u"For Heaven's Sake",u"http://www.gocomics.com/forheavenssake"),
# (u"Fort Knox",u"http://www.gocomics.com/fortknox"),
# (u"Four Eyes",u"http://www.gocomics.com/four-eyes"),
@@ -210,7 +210,7 @@ class GoComics(BasicNewsRecipe):
# (u"FoxTrot",u"http://www.gocomics.com/foxtrot"),
# (u"FoxTrot Classics",u"http://www.gocomics.com/foxtrotclassics"),
# (u"Francis",u"http://www.gocomics.com/francis"),
- (u"Frank and Ernest",u"http://www.gocomics.com/frank-and-ernest"),
+ (u'Frank and Ernest',u'http://www.gocomics.com/frank-and-ernest'),
# (u"Frankie Comics",u"http://www.gocomics.com/frankie-comics"),
# (u"Frazz",u"http://www.gocomics.com/frazz"),
# (u"Fred Basset",u"http://www.gocomics.com/fredbasset"),
@@ -219,7 +219,7 @@ class GoComics(BasicNewsRecipe):
# (u"Frog Applause",u"http://www.gocomics.com/frogapplause"),
# (u"From the Mo Willems Sketchbook",u"http://www.gocomics.com/from-the-mo-willems-sketchbook"),
# (u"The Fusco Brothers",u"http://www.gocomics.com/thefuscobrothers"),
- (u"Garfield",u"http://www.gocomics.com/garfield"),
+ (u'Garfield',u'http://www.gocomics.com/garfield'),
# (u"Garfield Classics",u"http://www.gocomics.com/garfield-classics"),
# (u"Garfield Minus Garfield",u"http://www.gocomics.com/garfieldminusgarfield"),
# (u"Gasoline Alley",u"http://www.gocomics.com/gasolinealley"),
@@ -227,7 +227,7 @@ class GoComics(BasicNewsRecipe):
# (u"Gentle Creatures",u"http://www.gocomics.com/gentle-creatures"),
# (u"The Gentleman's Armchair",u"http://www.gocomics.com/the-gentlemans-armchair"),
# (u"Get a Life",u"http://www.gocomics.com/getalife"),
- (u"Get Fuzzy",u"http://www.gocomics.com/getfuzzy"),
+ (u'Get Fuzzy',u'http://www.gocomics.com/getfuzzy'),
# (u"Gil",u"http://www.gocomics.com/gil"),
# (u"Gil Thorp",u"http://www.gocomics.com/gilthorp"),
# (u"Ginger Meggs",u"http://www.gocomics.com/gingermeggs"),
@@ -248,7 +248,7 @@ class GoComics(BasicNewsRecipe):
# (u"Phil Hands",u"http://www.gocomics.com/phil-hands"),
# (u"Health Capsules",u"http://www.gocomics.com/healthcapsules"),
# (u"Heart of the City",u"http://www.gocomics.com/heartofthecity"),
- (u"Heathcliff",u"http://www.gocomics.com/heathcliff"),
+ (u'Heathcliff',u'http://www.gocomics.com/heathcliff'),
# (u"Joe Heller",u"http://www.gocomics.com/joe-heller"),
# (u"Rebecca Hendin",u"http://www.gocomics.com/rebecca-hendin"),
# (u"Herb and Jamaal",u"http://www.gocomics.com/herbandjamaal"),
@@ -313,7 +313,7 @@ class GoComics(BasicNewsRecipe):
# (u"Lost Side of Suburbia",u"http://www.gocomics.com/lostsideofsuburbia"),
# (u"Lost Sheep",u"http://www.gocomics.com/lostsheep"),
# (u"Chan Lowe",u"http://www.gocomics.com/chanlowe"),
- (u"Luann",u"http://www.gocomics.com/luann"),
+ (u'Luann',u'http://www.gocomics.com/luann'),
# (u"Luann Againn",u"http://www.gocomics.com/luann-againn"),
# (u"Mike Luckovich",u"http://www.gocomics.com/mikeluckovich"),
# (u"Lucky Cow",u"http://www.gocomics.com/luckycow"),
@@ -326,7 +326,7 @@ class GoComics(BasicNewsRecipe):
# (u"Making It",u"http://www.gocomics.com/making-it"),
# (u"Maria's Day",u"http://www.gocomics.com/marias-day"),
# (u"Gary Markstein",u"http://www.gocomics.com/garymarkstein"),
- (u"Marmaduke",u"http://www.gocomics.com/marmaduke"),
+ (u'Marmaduke',u'http://www.gocomics.com/marmaduke'),
# (u"The Martian Confederacy",u"http://www.gocomics.com/the-martian-confederacy"),
# (u"MazeToons Puzzle",u"http://www.gocomics.com/mazetoons-puzzle"),
# (u"Glenn McCoy",u"http://www.gocomics.com/glennmccoy"),
@@ -335,13 +335,13 @@ class GoComics(BasicNewsRecipe):
# (u"Medium Large",u"http://www.gocomics.com/medium-large"),
# (u"Meg Classics",u"http://www.gocomics.com/meg-classics"),
# (u"Microcosm",u"http://www.gocomics.com/microcosm"),
- (u"The Middletons",u"http://www.gocomics.com/themiddletons"),
+ (u'The Middletons',u'http://www.gocomics.com/themiddletons'),
# (u"Mike du Jour",u"http://www.gocomics.com/mike-du-jour"),
# (u"Minimum Security",u"http://www.gocomics.com/minimumsecurity"),
# (u"Moderately Confused",u"http://www.gocomics.com/moderately-confused"),
# (u"Molebashed",u"http://www.gocomics.com/molebashed"),
# (u"Molly and the Bear",u"http://www.gocomics.com/mollyandthebear"),
- (u"Momma",u"http://www.gocomics.com/momma"),
+ (u'Momma',u'http://www.gocomics.com/momma'),
# (u"Mom's Cancer",u"http://www.gocomics.com/moms-cancer"),
# (u"Monty",u"http://www.gocomics.com/monty"),
# (u"Jim Morin",u"http://www.gocomics.com/jimmorin"),
@@ -359,7 +359,7 @@ class GoComics(BasicNewsRecipe):
# (u"New Adventures of Queen Victoria",u"http://www.gocomics.com/thenewadventuresofqueenvictoria"),
# (u"Next Door Neighbors",u"http://www.gocomics.com/next-door-neighbors"),
# (u"Nick and Zuzu",u"http://www.gocomics.com/nick-and-zuzu),
- (u"Non Sequitur",u"http://www.gocomics.com/nonsequitur"),
+ (u'Non Sequitur',u'http://www.gocomics.com/nonsequitur'),
# (u"The Norm 4.0",u"http://www.gocomics.com/the-norm-4-0"),
# (u"The Norm Classics",u"http://www.gocomics.com/thenorm"),
# (u"Not Invented Here",u"http://www.gocomics.com/not-invented-here"),
@@ -383,10 +383,10 @@ class GoComics(BasicNewsRecipe):
# (u"Ozy and Millie",u"http://www.gocomics.com/ozy-and-millie"),
# (u"Henry Payne",u"http://www.gocomics.com/henrypayne"),
# (u"PC and Pixel",u"http://www.gocomics.com/pcandpixel"),
- (u"Peanuts",u"http://www.gocomics.com/peanuts"),
+ (u'Peanuts',u'http://www.gocomics.com/peanuts'),
# (u"Peanuts Begins",u"http://www.gocomics.com/peanuts-begins"),
# (u"Peanuts Holiday Countdown",u"http://www.gocomics.com/peanuts-holiday-countdown"),
- (u"Pearls Before Swine",u"http://www.gocomics.com/pearlsbeforeswine"),
+ (u'Pearls Before Swine',u'http://www.gocomics.com/pearlsbeforeswine'),
# (u"Perry Bible Fellowship",u"http://www.gocomics.com/perry-bible-fellowship"),
# (u"Joel Pett",u"http://www.gocomics.com/joelpett"),
# (u"Phoebe and Her Unicorn",u"http://www.gocomics.com/phoebe-and-her-unicorn"),
@@ -398,7 +398,7 @@ class GoComics(BasicNewsRecipe):
# (u"Pinkerton",u"http://www.gocomics.com/pinkerton"),
# (u"Please Listen to Me",u"http://www.gocomics.com/please-listen-to-me"),
# (u"Pluggers",u"http://www.gocomics.com/pluggers"),
- (u"Pooch Cafe",u"http://www.gocomics.com/poochcafe"),
+ (u'Pooch Cafe',u'http://www.gocomics.com/poochcafe'),
# (u"Poorcraft",u"http://www.gocomics.com/poorcraft"),
# (u"Poorly Drawn Lines",u"http://www.gocomics.com/poorly-drawn-lines"),
# (u"Pop Culture Shock Therapy",u"http://www.gocomics.com/pop-culture-shock-therapy"),
@@ -427,7 +427,7 @@ class GoComics(BasicNewsRecipe):
# (u"Ripley's Believe It or Not",u"http://www.gocomics.com/ripleysbelieveitornot"),
# (u"Robbie and Bobby",u"http://www.gocomics.com/robbie-and-bobby"),
# (u"Rob Rogers",u"http://www.gocomics.com/robrogers"),
- (u"Rose is Rose",u"http://www.gocomics.com/roseisrose"),
+ (u'Rose is Rose',u'http://www.gocomics.com/roseisrose'),
# (u"Rubes",u"http://www.gocomics.com/rubes"),
# (u"Rudy Park",u"http://www.gocomics.com/rudypark"),
# (u"Sarah's Scribbles",u"http://www.gocomics.com/sarahs-scribbles"),
@@ -438,7 +438,7 @@ class GoComics(BasicNewsRecipe):
# (u"Sheldon",u"http://www.gocomics.com/sheldon"),
# (u"Drew Sheneman",u"http://www.gocomics.com/drewsheneman"),
# (u"Shirley and Son Classics",u"http://www.gocomics.com/shirley-and-son-classics"),
- (u"Shoe",u"http://www.gocomics.com/shoe"),
+ (u'Shoe',u'http://www.gocomics.com/shoe'),
# (u"Shoecabbage",u"http://www.gocomics.com/shoecabbage"),
# (u"Shortcuts",u"http://www.gocomics.com/shortcuts"),
# (u"Shutterbug Follies",u"http://www.gocomics.com/shutterbug-follies"),
@@ -524,7 +524,7 @@ class GoComics(BasicNewsRecipe):
# (u"Winston",u"http://www.gocomics.com/winston"),
# (u"Wit of the World",u"http://www.gocomics.com/witoftheworld"),
# (u"CartoonArts International",u"http://www.gocomics.com/witoftheworld"),
- (u"Wizard of Id",u"http://www.gocomics.com/wizardofid"),
+ (u'Wizard of Id',u'http://www.gocomics.com/wizardofid'),
# (u"Wizard of Id Classics",u"http://www.gocomics.com/wizard-of-id-classics"),
# (u"Wondermark",u"http://www.gocomics.com/wondermark"),
# (u"Working Daze",u"http://www.gocomics.com/working-daze"),
diff --git a/recipes/google_news.recipe b/recipes/google_news.recipe
index 7eca31974a..9777951626 100644
--- a/recipes/google_news.recipe
+++ b/recipes/google_news.recipe
@@ -68,7 +68,7 @@ class google_news_de(BasicNewsRecipe):
# feel free to add, wipe out what you need ---- can be edit by user
#
def get_feeds(self):
- url = "https://geolocation-db.com/json"
+ url = 'https://geolocation-db.com/json'
data = self.index_to_soup(url, raw=True)
data = json.loads(data)
country_code = str(data['country_code']).lower() # for me this is de
diff --git a/recipes/gosc_full.recipe b/recipes/gosc_full.recipe
index 2f1752f5be..26d05d39eb 100644
--- a/recipes/gosc_full.recipe
+++ b/recipes/gosc_full.recipe
@@ -32,7 +32,7 @@ class GN(BasicNewsRecipe):
page = doc.xpath(
'//div[@class="search-result release-result"]/div[1]/div[1]/a/@href')
- if time.strftime("%w") in ['3', '4']:
+ if time.strftime('%w') in ['3', '4']:
return page[5]
else:
return page[4]
diff --git a/recipes/granta.recipe b/recipes/granta.recipe
index 910e0a2cf5..989d4d9700 100644
--- a/recipes/granta.recipe
+++ b/recipes/granta.recipe
@@ -138,14 +138,14 @@ Magnitude = {
def text2num(s):
- a = re.split(r"[\s-]+", s)
+ a = re.split(r'[\s-]+', s)
n = 0
g = 0
for w in a:
x = Small.get(w, None)
if x is not None:
g += x
- elif w == "hundred" and g != 0:
+ elif w == 'hundred' and g != 0:
g *= 100
else:
x = Magnitude.get(w, None)
@@ -195,7 +195,7 @@ class Granta(BasicNewsRecipe):
if captcha_question is not None:
captcha = str(solve_captcha(captcha_question))
- br.select_form(method="post", action="https://granta.com/")
+ br.select_form(method='post', action='https://granta.com/')
br['username'] = self.username
br['password'] = self.password
br['capcha'] = captcha
diff --git a/recipes/grantland.recipe b/recipes/grantland.recipe
index c0d400e84f..ca08c6a03f 100644
--- a/recipes/grantland.recipe
+++ b/recipes/grantland.recipe
@@ -4,7 +4,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class GrantLand(BasicNewsRecipe):
- title = u"Grantland"
+ title = u'Grantland'
description = 'Writings on Sports & Pop Culture'
language = 'en'
__author__ = 'barty on mobileread.com forum'
@@ -51,7 +51,7 @@ class GrantLand(BasicNewsRecipe):
self.log('Reading category:', cat_name)
articles = []
- page = "%s/%s" % (self.INDEX, tag)
+ page = '%s/%s' % (self.INDEX, tag)
soup = self.index_to_soup(page)
main = soup.find('div', id='col-main')
diff --git a/recipes/greensboro_news_and_record.recipe b/recipes/greensboro_news_and_record.recipe
index 1056b97743..1e04fb8864 100644
--- a/recipes/greensboro_news_and_record.recipe
+++ b/recipes/greensboro_news_and_record.recipe
@@ -8,7 +8,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class NewsandRecord(BasicNewsRecipe):
title = u'Greensboro News & Record'
- description = "News from Greensboro, North Carolina"
+ description = 'News from Greensboro, North Carolina'
__author__ = 'Walt Anthony'
publisher = 'News & Record and Landmark Media Enterprises, LLC'
category = 'news, USA'
diff --git a/recipes/guardian.recipe b/recipes/guardian.recipe
index cf6df01729..9e05967e4a 100644
--- a/recipes/guardian.recipe
+++ b/recipes/guardian.recipe
@@ -22,10 +22,10 @@ class Guardian(BasicNewsRecipe):
title = u'The Guardian and The Observer'
is_observer = False
- base_url = "https://www.theguardian.com/uk"
+ base_url = 'https://www.theguardian.com/uk'
if date.today().weekday() == 6:
is_observer = True
- base_url = "https://www.theguardian.com/observer"
+ base_url = 'https://www.theguardian.com/observer'
__author__ = 'Kovid Goyal'
language = 'en_GB'
@@ -64,7 +64,7 @@ class Guardian(BasicNewsRecipe):
classes('content__article-body js-bottom-marker article-body-commercial-selector'),
]
- extra_css = """
+ extra_css = '''
img {
max-width: 100% !important;
max-height: 100% !important;
@@ -78,7 +78,7 @@ class Guardian(BasicNewsRecipe):
font-size: 0.5em;
color: #6B6B6B;
}
- """
+ '''
def get_browser(self, *a, **kw):
# This site returns images in JPEG-XR format if the user agent is IE
diff --git a/recipes/haaretz_en.recipe b/recipes/haaretz_en.recipe
index 525194c5d4..951687c1f7 100644
--- a/recipes/haaretz_en.recipe
+++ b/recipes/haaretz_en.recipe
@@ -33,13 +33,13 @@ class Haaretz_en(BasicNewsRecipe):
PREFIX = 'https://www.haaretz.com'
LOGIN = 'https://services.haaretz.com/ms-sso/loginUrlEncoded'
LOGOUT = 'https://services.haaretz.com/ms-sso/logout'
- extra_css = """
+ extra_css = '''
body{font-family: Merriweather, "Helvetica Neue", Helvetica, Arial, sans-serif }
div.mx time{display: none}
div.my time{display: none}
div.mq time{display: none}
div.mr time{display: none}
- """
+ '''
conversion_options = {
'comment': description, 'publisher': publisher, 'language': language
diff --git a/recipes/hankyoreh21.recipe b/recipes/hankyoreh21.recipe
index 3113df68e7..f89476c9f9 100644
--- a/recipes/hankyoreh21.recipe
+++ b/recipes/hankyoreh21.recipe
@@ -36,4 +36,4 @@ class Hankyoreh21(BasicNewsRecipe):
def get_article_url(self, article):
org_url = BasicNewsRecipe.get_article_url(self, article)
- return "http://h21.hani.co.kr" + org_url if org_url[0] == '/' else org_url
+ return 'http://h21.hani.co.kr' + org_url if org_url[0] == '/' else org_url
diff --git a/recipes/harpers.recipe b/recipes/harpers.recipe
index ee83add22c..c372d55fdc 100644
--- a/recipes/harpers.recipe
+++ b/recipes/harpers.recipe
@@ -35,7 +35,7 @@ class Harpers(BasicNewsRecipe):
remove_tags = [
classes('header-controls')
]
- remove_attributes = ["style", "width", "height"]
+ remove_attributes = ['style', 'width', 'height']
extra_css = '''
img {display:block; margin:0 auto;}
@@ -64,8 +64,8 @@ class Harpers(BasicNewsRecipe):
}
def parse_index(self):
- issues_soup = self.index_to_soup("https://harpers.org/issues/")
- a_ele = issues_soup.select_one("div.issue-card a")
+ issues_soup = self.index_to_soup('https://harpers.org/issues/')
+ a_ele = issues_soup.select_one('div.issue-card a')
self.timefmt = ' [' + self.tag_to_string(a_ele.find(attrs={'class':'issue-title'})) + ']'
url = a_ele['href']
diff --git a/recipes/hbr.recipe b/recipes/hbr.recipe
index 4dcde07c4e..8b326a18af 100644
--- a/recipes/hbr.recipe
+++ b/recipes/hbr.recipe
@@ -12,28 +12,28 @@ from calibre.web.feeds.news import BasicNewsRecipe, classes
class HBR(BasicNewsRecipe):
- title = "Harvard Business Review"
- __author__ = "unkn0wn, updated by ping"
+ title = 'Harvard Business Review'
+ __author__ = 'unkn0wn, updated by ping'
description = (
- "Harvard Business Review is the leading destination for smart management thinking. "
- "Through its flagship magazine, books, and digital content and tools published on HBR.org, "
- "Harvard Business Review aims to provide professionals around the world with rigorous insights "
- "and best practices to help lead themselves and their organizations more effectively and to "
- "make a positive impact."
+ 'Harvard Business Review is the leading destination for smart management thinking. '
+ 'Through its flagship magazine, books, and digital content and tools published on HBR.org, '
+ 'Harvard Business Review aims to provide professionals around the world with rigorous insights '
+ 'and best practices to help lead themselves and their organizations more effectively and to '
+ 'make a positive impact.'
)
- language = "en"
- masthead_url = "https://hbr.org/resources/css/images/hbr_logo.svg"
- publication_type = "magazine"
- encoding = "utf-8"
+ language = 'en'
+ masthead_url = 'https://hbr.org/resources/css/images/hbr_logo.svg'
+ publication_type = 'magazine'
+ encoding = 'utf-8'
remove_javascript = True
no_stylesheets = True
auto_cleanup = False
compress_news_images = True
- ignore_duplicate_articles = {"url"}
- base_url = "https://hbr.org"
+ ignore_duplicate_articles = {'url'}
+ base_url = 'https://hbr.org'
- remove_attributes = ["height", "width", "style"]
- extra_css = """
+ remove_attributes = ['height', 'width', 'style']
+ extra_css = '''
h1.article-hed { font-size: x-large; margin-bottom: 0.4rem; }
.article-dek { font-size: large; font-style: italic; margin-bottom: 1rem; }
.article-byline { margin-top: 0.7rem; font-size: medium; font-style: normal; font-weight: bold; }
@@ -50,35 +50,35 @@ class HBR(BasicNewsRecipe):
padding-top: 0.5rem;
font-style: italic;
}
- """
+ '''
keep_only_tags = [
classes(
- "headline-container article-dek-group pub-date hero-image-content "
- "article-body standard-content"
+ 'headline-container article-dek-group pub-date hero-image-content '
+ 'article-body standard-content'
),
]
remove_tags = [
classes(
- "left-rail--container translate-message follow-topic "
- "newsletter-container by-prefix related-topics--common"
+ 'left-rail--container translate-message follow-topic '
+ 'newsletter-container by-prefix related-topics--common'
),
- dict(name=["article-sidebar"]),
+ dict(name=['article-sidebar']),
]
def preprocess_raw_html(self, raw_html, article_url):
soup = self.soup(raw_html)
# break author byline out of list
- byline_list = soup.find("ul", class_="article-byline-list")
+ byline_list = soup.find('ul', class_='article-byline-list')
if byline_list:
byline = byline_list.parent
byline.append(
- ", ".join(
+ ', '.join(
[
self.tag_to_string(author)
- for author in byline_list.find_all(class_="article-author")
+ for author in byline_list.find_all(class_='article-author')
]
)
)
@@ -86,44 +86,44 @@ class HBR(BasicNewsRecipe):
# Extract full article content
content_ele = soup.find(
- "content",
+ 'content',
attrs={
- "data-index": True,
- "data-page-year": True,
- "data-page-month": True,
- "data-page-seo-title": True,
- "data-page-slug": True,
+ 'data-index': True,
+ 'data-page-year': True,
+ 'data-page-month': True,
+ 'data-page-seo-title': True,
+ 'data-page-slug': True,
},
)
- endpoint_url = "https://hbr.org/api/article/piano/content?" + urlencode(
+ endpoint_url = 'https://hbr.org/api/article/piano/content?' + urlencode(
{
- "year": content_ele["data-page-year"],
- "month": content_ele["data-page-month"],
- "seotitle": content_ele["data-page-seo-title"],
+ 'year': content_ele['data-page-year'],
+ 'month': content_ele['data-page-month'],
+ 'seotitle': content_ele['data-page-seo-title'],
}
)
data = {
- "contentKey": content_ele["data-index"],
- "pageSlug": content_ele["data-page-slug"],
+ 'contentKey': content_ele['data-index'],
+ 'pageSlug': content_ele['data-page-slug'],
}
headers = {
- "User-Agent": random_user_agent(),
- "Pragma": "no-cache",
- "Cache-Control": "no-cache",
- "Content-Type": "application/json",
- "Referer": article_url,
+ 'User-Agent': random_user_agent(),
+ 'Pragma': 'no-cache',
+ 'Cache-Control': 'no-cache',
+ 'Content-Type': 'application/json',
+ 'Referer': article_url,
}
br = browser()
req = Request(
endpoint_url,
headers=headers,
data=json.dumps(data),
- method="POST",
+ method='POST',
timeout=self.timeout,
)
res = br.open(req)
article = json.loads(res.read())
- new_soup = self.soup(article["content"])
+ new_soup = self.soup(article['content'])
# clear out existing partial content
for c in list(content_ele.children):
c.extract() # use extract() instead of decompose() because of strings
@@ -140,52 +140,52 @@ class HBR(BasicNewsRecipe):
def parse_index(self):
d = self.recipe_specific_options.get('issue')
if not (d and isinstance(d, str)):
- soup = self.index_to_soup(f"{self.base_url}/magazine")
- a = soup.find("a", href=lambda x: x and x.startswith("/archive-toc/"))
- cov_url = a.find("img", attrs={"src": True})["src"]
+ soup = self.index_to_soup(f'{self.base_url}/magazine')
+ a = soup.find('a', href=lambda x: x and x.startswith('/archive-toc/'))
+ cov_url = a.find('img', attrs={'src': True})['src']
self.cover_url = urljoin(self.base_url, cov_url)
- issue_url = urljoin(self.base_url, a["href"])
+ issue_url = urljoin(self.base_url, a['href'])
else:
issue_url = 'https://hbr.org/archive-toc/BR' + d
- mobj = re.search(r"archive-toc/(?P(BR)?\d+)\b", issue_url)
+ mobj = re.search(r'archive-toc/(?P(BR)?\d+)\b', issue_url)
if mobj:
self.cover_url = f'https://hbr.org/resources/images/covers/{mobj.group("issue")}_500.png'
- self.log("Downloading issue:", issue_url)
+ self.log('Downloading issue:', issue_url)
soup = self.index_to_soup(issue_url)
- issue_title = soup.find("h1")
+ issue_title = soup.find('h1')
if issue_title:
- self.timefmt = f" [{self.tag_to_string(issue_title)}]"
+ self.timefmt = f' [{self.tag_to_string(issue_title)}]'
feeds = OrderedDict()
- for h3 in soup.find_all("h3", attrs={"class": "hed"}):
- article_link_ele = h3.find("a")
+ for h3 in soup.find_all('h3', attrs={'class': 'hed'}):
+ article_link_ele = h3.find('a')
if not article_link_ele:
continue
article_ele = h3.find_next_sibling(
- "div", attrs={"class": "stream-item-info"}
+ 'div', attrs={'class': 'stream-item-info'}
)
if not article_ele:
continue
title = self.tag_to_string(article_link_ele)
- url = urljoin(self.base_url, article_link_ele["href"])
+ url = urljoin(self.base_url, article_link_ele['href'])
- authors_ele = article_ele.select("ul.byline li")
- authors = ", ".join([self.tag_to_string(a) for a in authors_ele])
+ authors_ele = article_ele.select('ul.byline li')
+ authors = ', '.join([self.tag_to_string(a) for a in authors_ele])
- article_desc = ""
- dek_ele = h3.find_next_sibling("div", attrs={"class": "dek"})
+ article_desc = ''
+ dek_ele = h3.find_next_sibling('div', attrs={'class': 'dek'})
if dek_ele:
- article_desc = self.tag_to_string(dek_ele) + " | " + authors
+ article_desc = self.tag_to_string(dek_ele) + ' | ' + authors
section_ele = (
- h3.findParent("li")
- .find_previous_sibling("div", **classes("stream-section-label"))
- .find("h4")
+ h3.findParent('li')
+ .find_previous_sibling('div', **classes('stream-section-label'))
+ .find('h4')
)
section_title = self.tag_to_string(section_ele).title()
feeds.setdefault(section_title, []).append(
- {"title": title, "url": url, "description": article_desc}
+ {'title': title, 'url': url, 'description': article_desc}
)
return feeds.items()
diff --git a/recipes/heise.recipe b/recipes/heise.recipe
index 886fad31cf..cae53ef938 100644
--- a/recipes/heise.recipe
+++ b/recipes/heise.recipe
@@ -68,4 +68,4 @@ class heiseDe(BasicNewsRecipe):
]
def get_article_url(self, article):
- return article.link + "&view=print"
+ return article.link + '&view=print'
diff --git a/recipes/heise_ct.recipe b/recipes/heise_ct.recipe
index 66ec19a02b..c48c29f9a6 100644
--- a/recipes/heise_ct.recipe
+++ b/recipes/heise_ct.recipe
@@ -108,7 +108,7 @@ class heise_select(BasicNewsRecipe):
img = soup.new_tag('img',
src=aimg['href'],
alt=aimg['data-pswp-bu'],
- style="display: block;")
+ style='display: block;')
if img is not None:
aimg.replaceWith(img)
diff --git a/recipes/heise_ix.recipe b/recipes/heise_ix.recipe
index dadbf1ac94..d4d7fcd1b3 100644
--- a/recipes/heise_ix.recipe
+++ b/recipes/heise_ix.recipe
@@ -109,7 +109,7 @@ class heise_select(BasicNewsRecipe):
'img',
src=aimg['href'],
alt=aimg['data-pswp-bu'],
- style="display: block;"
+ style='display: block;'
)
if img is not None:
aimg.replaceWith(img)
diff --git a/recipes/hindu.recipe b/recipes/hindu.recipe
index c1719195f8..b49539803a 100644
--- a/recipes/hindu.recipe
+++ b/recipes/hindu.recipe
@@ -17,7 +17,7 @@ def absurl(url):
class TheHindu(BasicNewsRecipe):
title = 'The Hindu'
__author__ = 'unkn0wn'
- description = 'Articles from The Hindu, Today\'s Paper.'
+ description = "Articles from The Hindu, Today's Paper."
language = 'en_IN'
no_stylesheets = True
masthead_url = 'https://www.thehindu.com/theme/images/th-online/thehindu-logo.svg'
@@ -133,7 +133,7 @@ class TheHindu(BasicNewsRecipe):
url = absurl(item['href'])
desc = 'Page no.' + item['pageno'] + ' | ' + item['teaser_text'] or ''
self.log(' ', title, '\n\t', url)
- feeds_dict[section].append({"title": title, "url": url, "description": desc})
+ feeds_dict[section].append({'title': title, 'url': url, 'description': desc})
return [(section, articles) for section, articles in feeds_dict.items()]
else:
return []
diff --git a/recipes/hindustan_times_print.recipe b/recipes/hindustan_times_print.recipe
index e90d28a446..9bf5d34f39 100644
--- a/recipes/hindustan_times_print.recipe
+++ b/recipes/hindustan_times_print.recipe
@@ -97,7 +97,7 @@ class ht(BasicNewsRecipe):
continue
desc = page_no
self.log('\t', title, ' ', desc)
- feeds_dict[section].append({"title": title, "description": desc, "url": url})
+ feeds_dict[section].append({'title': title, 'description': desc, 'url': url})
return [(section, articles) for section, articles in feeds_dict.items()]
diff --git a/recipes/history_today.recipe b/recipes/history_today.recipe
index eed6efad0a..b7f1462567 100644
--- a/recipes/history_today.recipe
+++ b/recipes/history_today.recipe
@@ -59,23 +59,23 @@ class HistoryToday(BasicNewsRecipe):
feeds = OrderedDict()
section_title = ''
- for section in div.findAll('div', attrs={'id': re.compile(r"block\-views\-contents.*")}):
+ for section in div.findAll('div', attrs={'id': re.compile(r'block\-views\-contents.*')}):
section_title = self.tag_to_string(
section.find('h2', attrs={'class': 'title'}))
sectionbody = section.find('div', attrs={'class': 'view-content'})
- for article in sectionbody.findAll('div', attrs={'class': re.compile(r"views\-row.*")}):
+ for article in sectionbody.findAll('div', attrs={'class': re.compile(r'views\-row.*')}):
articles = []
subarticle = []
subarticle = article.findAll('div')
if len(subarticle) < 2:
continue
title = self.tag_to_string(subarticle[0])
- originalurl = "https://www.historytoday.com" + \
+ originalurl = 'https://www.historytoday.com' + \
subarticle[0].span.a['href'].strip()
originalpage = self.index_to_soup(originalurl)
printurl = originalpage.find(
'div', attrs={'id': 'ht-tools'}).a['href'].strip()
- url = "https://www.historytoday.com" + printurl
+ url = 'https://www.historytoday.com' + printurl
desc = self.tag_to_string(subarticle[1])
articles.append({'title': title, 'url': url,
'description': desc, 'date': ''})
diff --git a/recipes/hoy.recipe b/recipes/hoy.recipe
index 8811b0e079..dd40c12488 100644
--- a/recipes/hoy.recipe
+++ b/recipes/hoy.recipe
@@ -69,7 +69,7 @@ class Hoy(BasicNewsRecipe):
def preprocess_html(self, soup):
soup.html['dir'] = self.direction
mcharset = new_tag(soup, 'meta', [
- ("http-equiv", "Content-Type"), ("content", "text/html; charset=utf-8")])
+ ('http-equiv', 'Content-Type'), ('content', 'text/html; charset=utf-8')])
soup.head.insert(0, mcharset)
for item in soup.findAll(style=True):
del item['style']
diff --git a/recipes/hurriyet.recipe b/recipes/hurriyet.recipe
index 26577b4faa..697fc037dd 100644
--- a/recipes/hurriyet.recipe
+++ b/recipes/hurriyet.recipe
@@ -35,8 +35,8 @@ class Hurriyet(BasicNewsRecipe):
compress_news_images = True
# some mild formatting
- extra_css = """.news-media { clear: left; }
- .news-detail-title { clear:left; }"""
+ extra_css = '''.news-media { clear: left; }
+ .news-detail-title { clear:left; }'''
keep_only_tags = [
# title
diff --git a/recipes/idnes.recipe b/recipes/idnes.recipe
index af359f2ec2..cca9499fb1 100644
--- a/recipes/idnes.recipe
+++ b/recipes/idnes.recipe
@@ -39,7 +39,7 @@ class iHeuteRecipe(BasicNewsRecipe):
def print_version(self, url):
print_url = url
- split_url = url.split("?")
+ split_url = url.split('?')
if (split_url[0].rfind('dilbert.asp') != -1): # dilbert komix
print_url = print_url.replace('.htm', '.gif&tisk=1')
print_url = print_url.replace('.asp', '.aspx')
diff --git a/recipes/ieee_spectrum_mag.recipe b/recipes/ieee_spectrum_mag.recipe
index 11c65b91f2..9575beb77e 100644
--- a/recipes/ieee_spectrum_mag.recipe
+++ b/recipes/ieee_spectrum_mag.recipe
@@ -7,18 +7,18 @@ from calibre.web.feeds.news import BasicNewsRecipe
class IEEESpectrumMagazine(BasicNewsRecipe):
- title = "IEEE Spectrum Magazine"
+ title = 'IEEE Spectrum Magazine'
language = 'en'
__author__ = 'yodha8'
- description = "Published on day 1 of every month."
+ description = 'Published on day 1 of every month.'
oldest_article = 120 # Mag gathers articles published older than a month online. So we scan for 4 months in the feed.
max_articles_per_feed = 100
auto_cleanup = True
# RSS feed for the current month
now = datetime.datetime.now()
- year_month = now.strftime("%Y/%B").lower()
- month_feed_url = "https://spectrum.ieee.org/feeds/magazine/{}.rss".format(
+ year_month = now.strftime('%Y/%B').lower()
+ month_feed_url = 'https://spectrum.ieee.org/feeds/magazine/{}.rss'.format(
year_month
)
@@ -28,8 +28,8 @@ class IEEESpectrumMagazine(BasicNewsRecipe):
def get_cover_url(self):
"""Go to this month's URL and pull cover image from there."""
- month_url = "https://spectrum.ieee.org/magazine/{}".format(self.year_month)
+ month_url = 'https://spectrum.ieee.org/magazine/{}'.format(self.year_month)
soup = self.index_to_soup(month_url)
- img_meta = soup.find("meta", property="og:image")
- img_url = img_meta["content"]
+ img_meta = soup.find('meta', property='og:image')
+ img_url = img_meta['content']
return img_url
diff --git a/recipes/il_messaggero.recipe b/recipes/il_messaggero.recipe
index ad6ac66af7..0743007cdf 100644
--- a/recipes/il_messaggero.recipe
+++ b/recipes/il_messaggero.recipe
@@ -43,15 +43,15 @@ class IlMessaggero(BasicNewsRecipe):
cover = None
st = time.localtime()
year = str(st.tm_year)
- month = "%.2d" % st.tm_mon
- day = "%.2d" % st.tm_mday
+ month = '%.2d' % st.tm_mon
+ day = '%.2d' % st.tm_mday
cover = 'http://carta.ilmessaggero.it/' + year + \
month + day + '/jpeg/MSGR_20_CITTA_1.jpg'
br = BasicNewsRecipe.get_browser(self)
try:
br.open(cover)
except:
- self.log("\nCover unavailable")
+ self.log('\nCover unavailable')
cover = 'http://www.ilmessaggero.it/img_tst/logomsgr.gif'
return cover
diff --git a/recipes/il_post.recipe b/recipes/il_post.recipe
index 42a08d5964..cd45d211b4 100644
--- a/recipes/il_post.recipe
+++ b/recipes/il_post.recipe
@@ -21,20 +21,20 @@ dates = [ date.today().strftime('%Y/%m/%d'), (date.today() - timedelta(1)).strft
# Comment (add # in front) to disable the sections you are not interested in
# Commenta (aggiungi # davanti alla riga) per disabilitare le sezioni che non vuoi scaricare
sections = [
- ("Italia", "https://www.ilpost.it/italia/"),
- ("Mondo", "https://www.ilpost.it/mondo/"),
- ("Politica", "https://www.ilpost.it/politica/"),
- ("Tecnologia", "https://www.ilpost.it/tecnologia/"),
- ("Internet", "https://www.ilpost.it/internet/"),
- ("Scienza", "https://www.ilpost.it/scienza/"),
- ("Cultura", "https://www.ilpost.it/cultura/"),
- ("Economia", "https://www.ilpost.it/economia/"),
- ("Sport", "https://www.ilpost.it/sport/"),
- ("Media", "https://www.ilpost.it/media/"),
- ("Moda", "https://www.ilpost.it/moda/"),
- ("Libri", "https://www.ilpost.it/libri/"),
- ("Auto", "https://www.ilpost.it/auto/"),
- ("Konrad", "https://www.ilpost.it/europa/"),
+ ('Italia', 'https://www.ilpost.it/italia/'),
+ ('Mondo', 'https://www.ilpost.it/mondo/'),
+ ('Politica', 'https://www.ilpost.it/politica/'),
+ ('Tecnologia', 'https://www.ilpost.it/tecnologia/'),
+ ('Internet', 'https://www.ilpost.it/internet/'),
+ ('Scienza', 'https://www.ilpost.it/scienza/'),
+ ('Cultura', 'https://www.ilpost.it/cultura/'),
+ ('Economia', 'https://www.ilpost.it/economia/'),
+ ('Sport', 'https://www.ilpost.it/sport/'),
+ ('Media', 'https://www.ilpost.it/media/'),
+ ('Moda', 'https://www.ilpost.it/moda/'),
+ ('Libri', 'https://www.ilpost.it/libri/'),
+ ('Auto', 'https://www.ilpost.it/auto/'),
+ ('Konrad', 'https://www.ilpost.it/europa/'),
]
# ----------- CUSTOMIZATION OPTIONS OVER -----------
@@ -45,16 +45,16 @@ class IlPost(BasicNewsRecipe):
__license__ = 'GPL v3'
__copyright__ = '2019, Marco Scirea '
- title = "Il Post"
- language = "it"
+ title = 'Il Post'
+ language = 'it'
description = (
'Puoi decidere quali sezioni scaricare modificando la ricetta.'
' Di default le immagini sono convertite in scala di grigio per risparmiare spazio,'
- ' la ricetta puo\' essere configurata per tenerle a colori'
+ " la ricetta puo' essere configurata per tenerle a colori"
)
- tags = "news"
+ tags = 'news'
masthead_url = 'https://www.ilpost.it/error/images/ilpost.svg'
- ignore_duplicate_articles = {"title", "url"}
+ ignore_duplicate_articles = {'title', 'url'}
no_stylesheets = True
extra_css = ' .wp-caption-text { font-size:small; } '
keep_only_tags = [dict(name='main', attrs={'id':lambda x: x and x.startswith('index_main-content__')})]
@@ -81,9 +81,9 @@ class IlPost(BasicNewsRecipe):
continue
self.log('\t', title)
entries.append({
- "url": link["href"],
- "title": title,
- "description": desc
+ 'url': link['href'],
+ 'title': title,
+ 'description': desc
})
return (name, entries)
diff --git a/recipes/ilsole24ore.recipe b/recipes/ilsole24ore.recipe
index e8b678155b..2bfaaac747 100644
--- a/recipes/ilsole24ore.recipe
+++ b/recipes/ilsole24ore.recipe
@@ -44,13 +44,13 @@ class IlSole24Ore(BasicNewsRecipe):
link = article.get('link', None)
if link is None:
return article
- if link.split('/')[-1] == "story01.htm":
+ if link.split('/')[-1] == 'story01.htm':
link = link.split('/')[-2]
a = ['0B', '0C', '0D', '0E', '0F', '0G', '0N', '0L0S', '0A']
b = ['.', '/', '?', '-', '=', '&', '.com', 'www.', '0']
for i in range(0, len(a)):
link = link.replace(a[i], b[i])
- link = "http://" + link
+ link = 'http://' + link
return link
feeds = [
diff --git a/recipes/inc.recipe b/recipes/inc.recipe
index f1dfa19c2a..501bff65e8 100644
--- a/recipes/inc.recipe
+++ b/recipes/inc.recipe
@@ -40,7 +40,7 @@ class IncMagazineRecipe(BasicNewsRecipe):
def get_browser(self):
def has_login_name(form):
try:
- form.find_control(name="email")
+ form.find_control(name='email')
except:
return False
else:
diff --git a/recipes/independent_australia.recipe b/recipes/independent_australia.recipe
index 1e2f5148da..d1fadf3a4a 100644
--- a/recipes/independent_australia.recipe
+++ b/recipes/independent_australia.recipe
@@ -46,7 +46,7 @@ class IndependentAustralia(BasicNewsRecipe):
remove_javascript = True
keep_only_tags = [
- dict(name='div', attrs={'class': "art-display"})
+ dict(name='div', attrs={'class': 'art-display'})
] # the article content is contained in
# ************************************
diff --git a/recipes/india_today.recipe b/recipes/india_today.recipe
index 3f2c960a3b..548edb323e 100644
--- a/recipes/india_today.recipe
+++ b/recipes/india_today.recipe
@@ -75,7 +75,7 @@ class IndiaToday(BasicNewsRecipe):
section = x[0]
try:
return (
- 'Editor\'s Note', 'Cover Story', 'The Big Story', 'Upfront',
+ "Editor's Note", 'Cover Story', 'The Big Story', 'Upfront',
'NATION', 'INTERVIEW'
).index(section)
except Exception:
diff --git a/recipes/indian_express.recipe b/recipes/indian_express.recipe
index cd94f157cc..6a185992d4 100644
--- a/recipes/indian_express.recipe
+++ b/recipes/indian_express.recipe
@@ -136,13 +136,13 @@ class IndianExpress(BasicNewsRecipe):
return citem['content'].replace('300', '600')
def preprocess_html(self, soup):
- if h2 := (soup.find(attrs={"itemprop": "description"}) or soup.find(**classes("synopsis"))):
+ if h2 := (soup.find(attrs={'itemprop': 'description'}) or soup.find(**classes('synopsis'))):
h2.name = 'p'
h2['id'] = 'sub-d'
for span in soup.findAll(
- "span", attrs={"class": ["ie-custom-caption", "custom-caption"]}
+ 'span', attrs={'class': ['ie-custom-caption', 'custom-caption']}
):
- span["id"] = "img-cap"
+ span['id'] = 'img-cap'
for img in soup.findAll('img', attrs={'data-src': True}):
img['src'] = img['data-src']
if span := soup.find('span', content=True, attrs={'itemprop': 'dateModified'}):
diff --git a/recipes/ing_dk.recipe b/recipes/ing_dk.recipe
index e75e24d5a8..ff47f9da78 100644
--- a/recipes/ing_dk.recipe
+++ b/recipes/ing_dk.recipe
@@ -21,9 +21,9 @@ class Ing_dk(BasicNewsRecipe):
auto_cleanup = True
keep_only_tags = [
- dict(name="div", attrs={'class': 'menu-article-current-title'}),
- dict(name="section", attrs={'class': 'byline'}),
- dict(name="section", attrs={'class': 'body'}),
+ dict(name='div', attrs={'class': 'menu-article-current-title'}),
+ dict(name='section', attrs={'class': 'byline'}),
+ dict(name='section', attrs={'class': 'body'}),
]
feeds = [
diff --git a/recipes/instapaper.recipe b/recipes/instapaper.recipe
index 7422b2bb4a..c7ecf69381 100644
--- a/recipes/instapaper.recipe
+++ b/recipes/instapaper.recipe
@@ -10,7 +10,7 @@
from calibre.web.feeds.news import BasicNewsRecipe
# The Gutenweb stylesheet from https://www.mobileread.com/forums/showpost.php?p=2809828&postcount=31
-gutenweb = """"html,body,div,span,applet,object,iframe,h1,h2,h3,h4,h5,h6,p,blockquote,pre,a,abbr,acronym,address,big,cite,code,del,dfn,em,img,ins,kbd,q,s,samp,small,strike,strong,sub,sup,tt,var,b,u,i,center,dl,dt,dd,ol,ul,li,fieldset,form,label,legend,table,caption,tbody,tfoot,thead,tr,th,td,article,aside,canvas,details,embed,figure,figcaption,footer,header,hgroup,menu,nav,output,ruby,section,summary,time,mark,audio,video{margin:0;padding:0;border:0;font-size:100%;font:inherit;vertical-align:baseline}article,aside,details,figcaption,figure,footer,header,hgroup,menu,nav,section{display:block}body{line-height:1}ol,ul{list-style:none}blockquote,q{quotes:none}blockquote:before,blockquote:after,q:before,q:after{content:\'\';content:none}table{border-collapse:collapse;border-spacing:0}html,:root{font-size:16px}body{font-size:1em;line-height:1.5em;margin-top:1.5em;margin-bottom:1.5em;max-width:33em;margin-left:auto;margin-right:auto;font-family:Helvetica,Arial,sans-serif;text-align:left;word-spacing:normal;hyphens:auto;orphans:2;widows:2;font-variant-numeric:oldstyle-nums}body *{max-width:100%}address,article,aside,audio,canvas,footer,header,ol,ul,dl,pre,section,table,video,img,figure{margin-top:1.5em;margin-bottom:1.5em}p{margin-top:1.5em;margin-bottom:0em}p+p{margin-top:0em;margin-bottom:0em;text-indent:1.5em}h1{font-size:2.25em;line-height:1.33333em;margin-top:0.66667em;margin-bottom:0.66667em}h2{font-size:1.5em;line-height:1em;margin-top:1em;margin-bottom:1em}h3{font-size:1.3125em;line-height:1.14286em;margin-top:1.14286em;margin-bottom:1.14286em}h4{font-size:1.125em;line-height:1.33333em;margin-top:1.33333em;margin-bottom:1.33333em}h1,h2,h3,h4,h5,h6{font-family:Georgia,serif;font-weight:bold;page-break-after:avoid}ul li{list-style-type:disc}ol li{list-style-type:decimal}li{list-style-position:inside;text-indent:1.5em}dt{font-weight:bold;float:left;margin-right:1.5em}tr{page-break-before:avoid;page-break-after:avoid}td,th{outline:0.1em solid #000;padding:0 0.5em;text-align:left}tfoot td{font-style:italic}caption{font-style:italic;text-align:center;font-style:italic}blockquote{margin-top:2.25em;margin-bottom:2.25em;margin-left:2.25em;margin-right:2.25em}blockquote p{margin-top:0em;margin-bottom:0em;text-indent:0}figure{text-align:center}figure img,figure audio,figure canvas,figure video,figure table{margin-top:0;margin-bottom:0}figcaption{font-size:0.875em;line-height:1.71429em;margin-top:0em;margin-bottom:1.71429em;font-style:italic}img{vertical-align:bottom}code,samp,kbd,var{font-family:Consolas,"Liberation Mono",Courier,monospace;font-size:0.875em;font-weight:normal;font-style:normal;text-decoration:none;line-height:0.875em;padding:0 0.3em}mark{background:#ff0;color:#000}code,.code,samp,kbd,var{background-color:#f8f8f8;box-shadow:0 0 0.1em 0.1em #ddd}em{font-style:italic}strong{font-weight:bold}abbr{letter-spacing:0.1em}abbr[title]{border-bottom:1px dotted #000}cite,q{font-style:italic}q{font-style:italic;quotes:"\xe2\x80\x9c" "\xe2\x80\x9d" "\xe2\x80\x98" "\xe2\x80\x99"}q:before{content:open-quote}q:after{content:close-quote}dfn{font-style:italic}sup,sub{font-size:70%;line-height:70%;position:relative}sup{top:-0.5em}sub{top:0.5em}hr{border-bottom:0.0625em solid #000;border-top:0 none;border-left:0 none;border-right:0 none;margin-top:1.4375em;margin-bottom:1.5em}small{font-size:0.875em;line-height:1.71429em;margin-top:1.71429em;margin-bottom:1.71429em}i{font-style:italic}b{font-weight:bold}u{text-decoration:underline}s{text-decoration:line-through}ins{font-weight:bold;text-decoration:underline}del{text-decoration:line-through}.caps,.nums{letter-spacing:0.1em}.caps{font-variant-numeric:lining-nums}.code{overflow:auto;padding:0 1em;background-color:#f8f8f8;box-shadow:0 0 0.1em 0.1em #ddd}.code code,.code samp,.code kbd,.code var{box-shadow:none;padding:0}.chapter{page-break-after:auto;page-break-before:always}.note{text-indent:0;font-size:0.875em;line-height:1.71429em;margin-top:1.71429em;margin-bottom:1.71429em}.verse{font-family:inherit;display:table;width:auto;margin-left:auto;margin-right:auto}.toc{margin:0 auto}.toc td,.toc th{outline:0 none}.toc th{padding:0 0.5em 0 0;text-align:right;font-weight:normal}.toc td:before{content:"\\2022";padding-right:0.5em}.toc td{padding:0;text-align:left;font-style:italic}@page{margin-top:72pt;margin-bottom:72pt}@media print{body{font-size:12pt;line-height:18pt;margin-top:0pt;margin-bottom:0pt;font-family:"Times New Roman",Times,serif}p{margin-top:18pt;margin-bottom:0pt}p+p{text-indent:18pt}address,article,aside,audio,canvas,footer,header,ol,ul,dl,pre,section,table,video,img,figure{margin-top:18pt;margin-bottom:18pt}h1{font-size:21pt;line-height:36pt;margin-top:18pt;margin-bottom:18pt}h2{font-size:18pt;line-height:18pt;margin-top:18pt;margin-bottom:18pt}h3{font-size:16pt;line-height:18pt;margin-top:18pt;margin-bottom:18pt}h4{font-size:14pt;line-height:18pt;margin-top:18pt;margin-bottom:18pt}dt{margin-right:18pt}li{text-indent:18pt}blockquote{margin-top:27pt;margin-bottom:27pt;margin-left:27pt;margin-right:27pt}blockquote p{margin-top:0em;margin-bottom:0em;text-indent:0}figcaption{font-size:10pt;line-height:18pt;margin-top:0pt;margin-bottom:18pt}pre{white-space:pre-line}abbr[title]{border-bottom:0 none}small{font-size:10pt;line-height:18pt;margin-top:18pt;margin-bottom:18pt}hr{border-bottom:0.08333em solid #000;margin-top:17pt;margin-bottom:18pt}.note{font-size:10pt;line-height:18pt;margin-top:18pt;margin-bottom:18pt}}""" # noqa: E501
+gutenweb = '''"html,body,div,span,applet,object,iframe,h1,h2,h3,h4,h5,h6,p,blockquote,pre,a,abbr,acronym,address,big,cite,code,del,dfn,em,img,ins,kbd,q,s,samp,small,strike,strong,sub,sup,tt,var,b,u,i,center,dl,dt,dd,ol,ul,li,fieldset,form,label,legend,table,caption,tbody,tfoot,thead,tr,th,td,article,aside,canvas,details,embed,figure,figcaption,footer,header,hgroup,menu,nav,output,ruby,section,summary,time,mark,audio,video{margin:0;padding:0;border:0;font-size:100%;font:inherit;vertical-align:baseline}article,aside,details,figcaption,figure,footer,header,hgroup,menu,nav,section{display:block}body{line-height:1}ol,ul{list-style:none}blockquote,q{quotes:none}blockquote:before,blockquote:after,q:before,q:after{content:\'\';content:none}table{border-collapse:collapse;border-spacing:0}html,:root{font-size:16px}body{font-size:1em;line-height:1.5em;margin-top:1.5em;margin-bottom:1.5em;max-width:33em;margin-left:auto;margin-right:auto;font-family:Helvetica,Arial,sans-serif;text-align:left;word-spacing:normal;hyphens:auto;orphans:2;widows:2;font-variant-numeric:oldstyle-nums}body *{max-width:100%}address,article,aside,audio,canvas,footer,header,ol,ul,dl,pre,section,table,video,img,figure{margin-top:1.5em;margin-bottom:1.5em}p{margin-top:1.5em;margin-bottom:0em}p+p{margin-top:0em;margin-bottom:0em;text-indent:1.5em}h1{font-size:2.25em;line-height:1.33333em;margin-top:0.66667em;margin-bottom:0.66667em}h2{font-size:1.5em;line-height:1em;margin-top:1em;margin-bottom:1em}h3{font-size:1.3125em;line-height:1.14286em;margin-top:1.14286em;margin-bottom:1.14286em}h4{font-size:1.125em;line-height:1.33333em;margin-top:1.33333em;margin-bottom:1.33333em}h1,h2,h3,h4,h5,h6{font-family:Georgia,serif;font-weight:bold;page-break-after:avoid}ul li{list-style-type:disc}ol li{list-style-type:decimal}li{list-style-position:inside;text-indent:1.5em}dt{font-weight:bold;float:left;margin-right:1.5em}tr{page-break-before:avoid;page-break-after:avoid}td,th{outline:0.1em solid #000;padding:0 0.5em;text-align:left}tfoot td{font-style:italic}caption{font-style:italic;text-align:center;font-style:italic}blockquote{margin-top:2.25em;margin-bottom:2.25em;margin-left:2.25em;margin-right:2.25em}blockquote p{margin-top:0em;margin-bottom:0em;text-indent:0}figure{text-align:center}figure img,figure audio,figure canvas,figure video,figure table{margin-top:0;margin-bottom:0}figcaption{font-size:0.875em;line-height:1.71429em;margin-top:0em;margin-bottom:1.71429em;font-style:italic}img{vertical-align:bottom}code,samp,kbd,var{font-family:Consolas,"Liberation Mono",Courier,monospace;font-size:0.875em;font-weight:normal;font-style:normal;text-decoration:none;line-height:0.875em;padding:0 0.3em}mark{background:#ff0;color:#000}code,.code,samp,kbd,var{background-color:#f8f8f8;box-shadow:0 0 0.1em 0.1em #ddd}em{font-style:italic}strong{font-weight:bold}abbr{letter-spacing:0.1em}abbr[title]{border-bottom:1px dotted #000}cite,q{font-style:italic}q{font-style:italic;quotes:"\xe2\x80\x9c" "\xe2\x80\x9d" "\xe2\x80\x98" "\xe2\x80\x99"}q:before{content:open-quote}q:after{content:close-quote}dfn{font-style:italic}sup,sub{font-size:70%;line-height:70%;position:relative}sup{top:-0.5em}sub{top:0.5em}hr{border-bottom:0.0625em solid #000;border-top:0 none;border-left:0 none;border-right:0 none;margin-top:1.4375em;margin-bottom:1.5em}small{font-size:0.875em;line-height:1.71429em;margin-top:1.71429em;margin-bottom:1.71429em}i{font-style:italic}b{font-weight:bold}u{text-decoration:underline}s{text-decoration:line-through}ins{font-weight:bold;text-decoration:underline}del{text-decoration:line-through}.caps,.nums{letter-spacing:0.1em}.caps{font-variant-numeric:lining-nums}.code{overflow:auto;padding:0 1em;background-color:#f8f8f8;box-shadow:0 0 0.1em 0.1em #ddd}.code code,.code samp,.code kbd,.code var{box-shadow:none;padding:0}.chapter{page-break-after:auto;page-break-before:always}.note{text-indent:0;font-size:0.875em;line-height:1.71429em;margin-top:1.71429em;margin-bottom:1.71429em}.verse{font-family:inherit;display:table;width:auto;margin-left:auto;margin-right:auto}.toc{margin:0 auto}.toc td,.toc th{outline:0 none}.toc th{padding:0 0.5em 0 0;text-align:right;font-weight:normal}.toc td:before{content:"\\2022";padding-right:0.5em}.toc td{padding:0;text-align:left;font-style:italic}@page{margin-top:72pt;margin-bottom:72pt}@media print{body{font-size:12pt;line-height:18pt;margin-top:0pt;margin-bottom:0pt;font-family:"Times New Roman",Times,serif}p{margin-top:18pt;margin-bottom:0pt}p+p{text-indent:18pt}address,article,aside,audio,canvas,footer,header,ol,ul,dl,pre,section,table,video,img,figure{margin-top:18pt;margin-bottom:18pt}h1{font-size:21pt;line-height:36pt;margin-top:18pt;margin-bottom:18pt}h2{font-size:18pt;line-height:18pt;margin-top:18pt;margin-bottom:18pt}h3{font-size:16pt;line-height:18pt;margin-top:18pt;margin-bottom:18pt}h4{font-size:14pt;line-height:18pt;margin-top:18pt;margin-bottom:18pt}dt{margin-right:18pt}li{text-indent:18pt}blockquote{margin-top:27pt;margin-bottom:27pt;margin-left:27pt;margin-right:27pt}blockquote p{margin-top:0em;margin-bottom:0em;text-indent:0}figcaption{font-size:10pt;line-height:18pt;margin-top:0pt;margin-bottom:18pt}pre{white-space:pre-line}abbr[title]{border-bottom:0 none}small{font-size:10pt;line-height:18pt;margin-top:18pt;margin-bottom:18pt}hr{border-bottom:0.08333em solid #000;margin-top:17pt;margin-bottom:18pt}.note{font-size:10pt;line-height:18pt;margin-top:18pt;margin-bottom:18pt}}''' # noqa: E501
class InstapaperRecipe(BasicNewsRecipe):
diff --git a/recipes/internazionale.recipe b/recipes/internazionale.recipe
index d2c1a0bbfc..19540caf93 100644
--- a/recipes/internazionale.recipe
+++ b/recipes/internazionale.recipe
@@ -29,13 +29,13 @@ class Volkskrant(BasicNewsRecipe):
),
dict(name=['script', 'style']),
]
- remove_attributes = ["class", "id", "name", "style"]
+ remove_attributes = ['class', 'id', 'name', 'style']
encoding = 'utf-8'
no_stylesheets = True
ignore_duplicate_articles = {'url'}
- current_number_url = "https://www.internazionale.it/sommario"
- home_url = "https://www.internazionale.it"
+ current_number_url = 'https://www.internazionale.it/sommario'
+ home_url = 'https://www.internazionale.it'
cover_url = None
def extract_article(self, article):
diff --git a/recipes/iol_za.recipe b/recipes/iol_za.recipe
index 48072617eb..8ddc433fb9 100644
--- a/recipes/iol_za.recipe
+++ b/recipes/iol_za.recipe
@@ -23,10 +23,10 @@ class IOL_za(BasicNewsRecipe):
remove_empty_feeds = True
publication_type = 'newsportal'
masthead_url = 'http://www.iol.co.za/polopoly_fs/iol-news5-1.989381!/image/464471284.png_gen/derivatives/absolute/464471284.png'
- extra_css = """
+ extra_css = '''
body{font-family: Arial,Helvetica,sans-serif }
img{display: block}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/iprofesional.recipe b/recipes/iprofesional.recipe
index 27dad0a157..c2bbb98246 100644
--- a/recipes/iprofesional.recipe
+++ b/recipes/iprofesional.recipe
@@ -25,7 +25,7 @@ class iProfesional(BasicNewsRecipe):
remove_empty_feeds = True
publication_type = 'newsportal'
masthead_url = 'http://www.iprofesional.com/img/header/logoiprofesional.png'
- extra_css = """
+ extra_css = '''
body{font-family: "Open Sans", sans-serif}
img{margin-bottom: 0.4em; display:block}
.tituloprincipal{font-family: WhitneyBold, Arial, sans-serif;
@@ -33,7 +33,7 @@ class iProfesional(BasicNewsRecipe):
font-size: x-large;
display: block; margin-bottom: 1em;}
.bajadanh{font-size: small; display: block; margin-bottom: 1em;}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/jacobinmag.recipe b/recipes/jacobinmag.recipe
index 5804d55fec..2338d62c7c 100644
--- a/recipes/jacobinmag.recipe
+++ b/recipes/jacobinmag.recipe
@@ -35,11 +35,11 @@ class Jacobinmag(BasicNewsRecipe):
issue_url = None
PREFIX = 'https://www.jacobinmag.com'
LOGIN = 'https://auth.jacobinmag.com/mini_profile?redirect=https%3A%2F%2Fwww.jacobinmag.com%2F'
- extra_css = """
+ extra_css = '''
body{font-family: Antwerp, 'Times New Roman', Times, serif}
img{margin-top:1em; margin-bottom: 1em; display:block}
.entry-dek,.entry-author{font-family: Hurme-No3, Futura, sans-serif}
- """
+ '''
conversion_options = {
'comment': description,
diff --git a/recipes/japan_times.recipe b/recipes/japan_times.recipe
index eb1db6493c..fad4f6eb2b 100644
--- a/recipes/japan_times.recipe
+++ b/recipes/japan_times.recipe
@@ -1,36 +1,36 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-__license__ = "GPL v3"
+__license__ = 'GPL v3'
__copyright__ = (
- "2008-2013, Darko Miletic . "
- "2022, Albert Aparicio Isarn "
+ '2008-2013, Darko Miletic . '
+ '2022, Albert Aparicio Isarn '
)
-"""
+'''
japantimes.co.jp
-"""
+'''
from calibre.web.feeds.news import BasicNewsRecipe
class JapanTimes(BasicNewsRecipe):
- title = "The Japan Times"
- __author__ = "Albert Aparicio Isarn (original recipe by Darko Miletic)"
+ title = 'The Japan Times'
+ __author__ = 'Albert Aparicio Isarn (original recipe by Darko Miletic)'
description = (
"The latest news from Japan Times, Japan's leading English-language daily newspaper"
)
- language = "en_JP"
- category = "news, politics, japan"
- publisher = "The Japan Times"
+ language = 'en_JP'
+ category = 'news, politics, japan'
+ publisher = 'The Japan Times'
oldest_article = 2
max_articles_per_feed = 150
no_stylesheets = True
remove_javascript = True
use_embedded_content = False
- encoding = "utf8"
- publication_type = "newspaper"
- masthead_url = "https://cdn-japantimes.com/wp-content/themes/jt_theme/library/img/japantimes-logo-tagline.png"
- extra_css = "body{font-family: Geneva,Arial,Helvetica,sans-serif}"
+ encoding = 'utf8'
+ publication_type = 'newspaper'
+ masthead_url = 'https://cdn-japantimes.com/wp-content/themes/jt_theme/library/img/japantimes-logo-tagline.png'
+ extra_css = 'body{font-family: Geneva,Arial,Helvetica,sans-serif}'
recipe_specific_options = {
'days': {
@@ -47,37 +47,37 @@ class JapanTimes(BasicNewsRecipe):
self.oldest_article = float(d)
conversion_options = {
- "comment": description,
- "tags": category,
- "publisher": publisher,
- "language": language,
+ 'comment': description,
+ 'tags': category,
+ 'publisher': publisher,
+ 'language': language,
}
- remove_tags_before = {"name": "h1"}
- remove_tags_after = {"name": "ul", "attrs": {"class": "single-sns-area"}}
+ remove_tags_before = {'name': 'h1'}
+ remove_tags_after = {'name': 'ul', 'attrs': {'class': 'single-sns-area'}}
keep_only_tags = [
- {"name": "div", "attrs": {"class": "padding_block"}},
+ {'name': 'div', 'attrs': {'class': 'padding_block'}},
# {"name": "h5", "attrs": {"class": "writer", "role": "author"}},
# {"name": "p", "attrs": {"class": "credit"}},
]
remove_tags = [
- {"name": "div", "id": "no_js_blocker", "attrs": {"class": "padding_block"}},
- {"name": "div", "attrs": {"class": "single-upper-meta"}},
- {"name": "ul", "attrs": {"class": "single-sns-area"}},
+ {'name': 'div', 'id': 'no_js_blocker', 'attrs': {'class': 'padding_block'}},
+ {'name': 'div', 'attrs': {'class': 'single-upper-meta'}},
+ {'name': 'ul', 'attrs': {'class': 'single-sns-area'}},
]
feeds = [
- (u"Top Stories", u"https://www.japantimes.co.jp/feed/topstories/"),
- (u"News", u"https://www.japantimes.co.jp/news/feed/"),
- (u"Opinion", u"https://www.japantimes.co.jp/opinion/feed/"),
- (u"Life", u"https://www.japantimes.co.jp/life/feed/"),
- (u"Community", u"https://www.japantimes.co.jp/community/feed/"),
- (u"Culture", u"https://www.japantimes.co.jp/culture/feed/"),
- (u"Sports", u"https://www.japantimes.co.jp/sports/feed/"),
+ (u'Top Stories', u'https://www.japantimes.co.jp/feed/topstories/'),
+ (u'News', u'https://www.japantimes.co.jp/news/feed/'),
+ (u'Opinion', u'https://www.japantimes.co.jp/opinion/feed/'),
+ (u'Life', u'https://www.japantimes.co.jp/life/feed/'),
+ (u'Community', u'https://www.japantimes.co.jp/community/feed/'),
+ (u'Culture', u'https://www.japantimes.co.jp/culture/feed/'),
+ (u'Sports', u'https://www.japantimes.co.jp/sports/feed/'),
]
def get_article_url(self, article):
rurl = BasicNewsRecipe.get_article_url(self, article)
- return rurl.partition("?")[0]
+ return rurl.partition('?')[0]
def preprocess_raw_html(self, raw, url):
- return "" + raw[raw.find("") :]
+ return '' + raw[raw.find('') :]
diff --git a/recipes/javalobby.recipe b/recipes/javalobby.recipe
index dfd5396dd9..9d884cd5a6 100644
--- a/recipes/javalobby.recipe
+++ b/recipes/javalobby.recipe
@@ -19,10 +19,10 @@ class Engadget(BasicNewsRecipe):
no_stylesheets = True
use_embedded_content = False
- remove_tags = [dict(name='div', attrs={'class': ["fivestar-static-form-item", "relatedContent", "pagination clearfix", "addResources"]}),
- dict(name='div', attrs={'id': ["comments"]})]
+ remove_tags = [dict(name='div', attrs={'class': ['fivestar-static-form-item', 'relatedContent', 'pagination clearfix', 'addResources']}),
+ dict(name='div', attrs={'id': ['comments']})]
- keep_only_tags = [dict(name='div', attrs={'id': ["article"]})]
+ keep_only_tags = [dict(name='div', attrs={'id': ['article']})]
feeds = [(u'news', u'http://feeds.dzone.com/javalobby/frontpage')]
diff --git a/recipes/jijinews.recipe b/recipes/jijinews.recipe
index bd2a471e42..02728f2f6d 100644
--- a/recipes/jijinews.recipe
+++ b/recipes/jijinews.recipe
@@ -23,8 +23,8 @@ class JijiDotCom(BasicNewsRecipe):
feeds = [(u'\u30cb\u30e5\u30fc\u30b9',
u'http://www.jiji.com/rss/ranking.rdf')]
- remove_tags_before = dict(id="article-area")
- remove_tags_after = dict(id="ad_google")
+ remove_tags_before = dict(id='article-area')
+ remove_tags_after = dict(id='ad_google')
def get_cover_url(self):
cover_url = 'http://www.jiji.com/img/top_header_logo2.gif'
diff --git a/recipes/kirkusreviews.recipe b/recipes/kirkusreviews.recipe
index 99de429ee3..c3437f2fd7 100644
--- a/recipes/kirkusreviews.recipe
+++ b/recipes/kirkusreviews.recipe
@@ -4,20 +4,20 @@ from calibre.web.feeds.news import BasicNewsRecipe
class KirkusReviews(BasicNewsRecipe):
- title = "Kirkus Reviews"
- description = ("Kirkus Reviews is an American book review magazine founded in 1933 by Virginia Kirkus."
- " The magazine is headquartered in New York City. Released twice monthly on the 1st/15th.")
- language = "en"
- __author__ = "ping"
- publication_type = "magazine"
+ title = 'Kirkus Reviews'
+ description = ('Kirkus Reviews is an American book review magazine founded in 1933 by Virginia Kirkus.'
+ ' The magazine is headquartered in New York City. Released twice monthly on the 1st/15th.')
+ language = 'en'
+ __author__ = 'ping'
+ publication_type = 'magazine'
masthead_url = (
- "https://d1fd687oe6a92y.cloudfront.net/img/kir_images/logo/kirkus-nav-logo.svg"
+ 'https://d1fd687oe6a92y.cloudfront.net/img/kir_images/logo/kirkus-nav-logo.svg'
)
- encoding = "utf-8"
+ encoding = 'utf-8'
remove_javascript = True
no_stylesheets = True
auto_cleanup = False
- ignore_duplicate_articles = {"url"}
+ ignore_duplicate_articles = {'url'}
compress_news_images = True
compress_news_images_auto_size = 6
max_articles_per_feed = 99
@@ -25,105 +25,105 @@ class KirkusReviews(BasicNewsRecipe):
keep_only_tags = [
dict(
class_=[
- "article-author",
- "article-author-img-start",
- "article-author-description-start",
- "single-review",
+ 'article-author',
+ 'article-author-img-start',
+ 'article-author-description-start',
+ 'single-review',
]
)
]
remove_tags = [
dict(
class_=[
- "sidebar-content",
- "article-social-share-desktop-first",
- "article-social-share-desktop-pagination",
- "article-social-share-mobile",
- "share-review-text",
- "like-dislike-article",
- "rate-this-book-text",
- "input-group",
- "user-comments",
- "show-all-response-text",
- "button-row",
- "hide-on-mobile",
- "related-article",
- "breadcrumb-row",
- "shop-now-dropdown",
+ 'sidebar-content',
+ 'article-social-share-desktop-first',
+ 'article-social-share-desktop-pagination',
+ 'article-social-share-mobile',
+ 'share-review-text',
+ 'like-dislike-article',
+ 'rate-this-book-text',
+ 'input-group',
+ 'user-comments',
+ 'show-all-response-text',
+ 'button-row',
+ 'hide-on-mobile',
+ 'related-article',
+ 'breadcrumb-row',
+ 'shop-now-dropdown',
]
)
]
- remove_tags_after = [dict(class_="single-review")]
+ remove_tags_after = [dict(class_='single-review')]
- extra_css = """
+ extra_css = '''
.image-container img { max-width: 100%; height: auto; margin-bottom: 0.2rem; }
.photo-caption { font-size: 0.8rem; margin-bottom: 0.5rem; display: block; }
.book-review-img .image-container { text-align: center; }
.book-rating-module .description-title { font-size: 1.25rem; margin-left: 0; text-align: center; }
- """
+ '''
def preprocess_html(self, soup):
- h1 = soup.find(class_="article-title")
- book_cover = soup.find("ul", class_="book-review-img")
+ h1 = soup.find(class_='article-title')
+ book_cover = soup.find('ul', class_='book-review-img')
if book_cover:
- for li in book_cover.find_all("li"):
- li.name = "div"
- book_cover.name = "div"
+ for li in book_cover.find_all('li'):
+ li.name = 'div'
+ book_cover.name = 'div'
if h1:
book_cover.insert_before(h1.extract())
return soup
def parse_index(self):
- issue_url = "https://www.kirkusreviews.com/magazine/current/"
+ issue_url = 'https://www.kirkusreviews.com/magazine/current/'
soup = self.index_to_soup(issue_url)
- issue = soup.find(name="article", class_="issue-container")
- cover_img = issue.select(".issue-header .cover-image img")
+ issue = soup.find(name='article', class_='issue-container')
+ cover_img = issue.select('.issue-header .cover-image img')
if cover_img:
- self.cover_url = cover_img[0]["src"]
+ self.cover_url = cover_img[0]['src']
- h1 = issue.find("h1")
+ h1 = issue.find('h1')
if h1:
- self.timefmt = f" [{self.tag_to_string(h1)}]" # edition
+ self.timefmt = f' [{self.tag_to_string(h1)}]' # edition
articles = {}
- for book_ele in soup.find_all(name="div", class_="issue-featured-book"):
- link = book_ele.find("a")
+ for book_ele in soup.find_all(name='div', class_='issue-featured-book'):
+ link = book_ele.find('a')
if not link:
continue
- section = self.tag_to_string(book_ele.find("h3")).upper()
+ section = self.tag_to_string(book_ele.find('h3')).upper()
articles.setdefault(section, []).append(
- {"url": urljoin(issue_url, link["href"]), "title": link["title"]}
+ {'url': urljoin(issue_url, link['href']), 'title': link['title']}
)
- for post_ele in issue.select("div.issue-more-posts ul li div.lead-text"):
- link = post_ele.find("a")
+ for post_ele in issue.select('div.issue-more-posts ul li div.lead-text'):
+ link = post_ele.find('a')
if not link:
continue
- section = self.tag_to_string(post_ele.find(class_="lead-text-type")).upper()
+ section = self.tag_to_string(post_ele.find(class_='lead-text-type')).upper()
articles.setdefault(section, []).append(
{
- "url": urljoin(issue_url, link["href"]),
- "title": self.tag_to_string(link),
+ 'url': urljoin(issue_url, link['href']),
+ 'title': self.tag_to_string(link),
}
)
- for section_ele in issue.select("section.reviews-section"):
+ for section_ele in issue.select('section.reviews-section'):
section_articles = []
- for review in section_ele.select("ul li.starred"):
- link = review.select("h4 a")
+ for review in section_ele.select('ul li.starred'):
+ link = review.select('h4 a')
if not link:
continue
- description = review.find("p")
+ description = review.find('p')
section_articles.append(
{
- "url": urljoin(issue_url, link[0]["href"]),
- "title": self.tag_to_string(link[0]),
- "description": ""
+ 'url': urljoin(issue_url, link[0]['href']),
+ 'title': self.tag_to_string(link[0]),
+ 'description': ''
if not description
else self.tag_to_string(description),
}
)
if not section_articles:
continue
- section = self.tag_to_string(section_ele.find("h3")).upper()
+ section = self.tag_to_string(section_ele.find('h3')).upper()
if section not in articles:
articles[section] = []
articles.setdefault(section, []).extend(section_articles)
diff --git a/recipes/kopalniawiedzy.recipe b/recipes/kopalniawiedzy.recipe
index c9189ca643..f0e8ce9354 100644
--- a/recipes/kopalniawiedzy.recipe
+++ b/recipes/kopalniawiedzy.recipe
@@ -25,7 +25,7 @@ class KopalniaWiedzy(BasicNewsRecipe):
{'name': 'div', 'attrs': {'class': 'article-time-and-cat'}}, {'name': 'p', 'attrs': {'class': 'tags'}}]
remove_tags_after = dict(attrs={'class': 'ad-square'})
keep_only_tags = [
- dict(name="div", attrs={'class': 'article-text text-small'})]
+ dict(name='div', attrs={'class': 'article-text text-small'})]
extra_css = '.topimage {margin-top: 30px}'
preprocess_regexps = [
diff --git a/recipes/korben.recipe b/recipes/korben.recipe
index 30a8b12cca..aa1b0f3492 100644
--- a/recipes/korben.recipe
+++ b/recipes/korben.recipe
@@ -17,6 +17,6 @@ class BasicUserRecipe1318619728(BasicNewsRecipe):
try:
br.open(masthead)
except:
- self.log("\nCover unavailable")
+ self.log('\nCover unavailable')
masthead = None
return masthead
diff --git a/recipes/kudy_z_nudy.recipe b/recipes/kudy_z_nudy.recipe
index 5a5b320e3a..963d3185a9 100644
--- a/recipes/kudy_z_nudy.recipe
+++ b/recipes/kudy_z_nudy.recipe
@@ -22,8 +22,8 @@ class kudyznudyRecipe(BasicNewsRecipe):
cover_url = 'http://www.kudyznudy.cz/App_Themes/KzN/Images/Containers/Header/HeaderLogoKZN.png'
remove_javascript = True
no_stylesheets = True
- extra_css = """
- """
+ extra_css = '''
+ '''
remove_attributes = []
remove_tags_before = dict(
diff --git a/recipes/la_jornada.recipe b/recipes/la_jornada.recipe
index cbf806c3ef..ea13c1cf7d 100644
--- a/recipes/la_jornada.recipe
+++ b/recipes/la_jornada.recipe
@@ -33,10 +33,10 @@ class LaJornada_mx(BasicNewsRecipe):
use_embedded_content = False
language = 'es_MX'
remove_empty_feeds = True
- cover_url = strftime("http://www.jornada.com.mx/%Y/%m/%d/portada.pdf")
+ cover_url = strftime('http://www.jornada.com.mx/%Y/%m/%d/portada.pdf')
masthead_url = 'http://www.jornada.com.mx/v7.0/imagenes/la-jornada-trans.png'
publication_type = 'newspaper'
- extra_css = """
+ extra_css = '''
body{font-family: "Times New Roman",serif }
.cabeza{font-size: xx-large; font-weight: bold }
.documentFirstHeading{font-size: xx-large; font-weight: bold }
@@ -54,7 +54,7 @@ class LaJornada_mx(BasicNewsRecipe):
.text{margin-top: 1.4em}
p.inicial{display: inline; font-size: xx-large; font-weight: bold}
p.s-s{display: inline; text-indent: 0}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/la_republica.recipe b/recipes/la_republica.recipe
index 4af4575bf9..9efeaeacbd 100644
--- a/recipes/la_republica.recipe
+++ b/recipes/la_republica.recipe
@@ -17,7 +17,7 @@ class LaRepubblica(BasicNewsRecipe):
__author__ = 'Lorenzo Vigentini, Gabriele Marini, Darko Miletic, faber1971'
description = 'il quotidiano online con tutte le notizie in tempo reale. News e ultime notizie. Tutti i settori: politica, cronaca, economia, sport, esteri, scienza, tecnologia, internet, spettacoli, musica, cultura, arte, mostre, libri, dvd, vhs, concerti, cinema, attori, attrici, recensioni, chat, cucina, mappe. Le citta di Repubblica: Roma, Milano, Bologna, Firenze, Palermo, Napoli, Bari, Torino.' # noqa: E501
masthead_url = 'http://www.repubblica.it/static/images/homepage/2010/la-repubblica-logo-home-payoff.png'
- publisher = 'Gruppo editoriale L\'Espresso'
+ publisher = "Gruppo editoriale L'Espresso"
category = 'News, politics, culture, economy, general interest'
language = 'it'
timefmt = '[%a, %d %b, %Y]'
@@ -28,9 +28,9 @@ class LaRepubblica(BasicNewsRecipe):
publication_type = 'newspaper'
articles_are_obfuscated = True
temp_files = []
- extra_css = """
+ extra_css = '''
img{display: block}
- """
+ '''
remove_attributes = ['width', 'height', 'lang', 'xmlns:og', 'xmlns:fb']
@@ -50,7 +50,7 @@ class LaRepubblica(BasicNewsRecipe):
html = response.read()
count = 10
except:
- print("Retrying download...")
+ print('Retrying download...')
count += 1
self.temp_files.append(PersistentTemporaryFile('_fa.html'))
self.temp_files[-1].write(html)
diff --git a/recipes/lalibre_be.recipe b/recipes/lalibre_be.recipe
index cef8318a0e..100c32712a 100644
--- a/recipes/lalibre_be.recipe
+++ b/recipes/lalibre_be.recipe
@@ -32,7 +32,7 @@ class LaLibre(BasicNewsRecipe):
feeds = [
- (u'L\'actu', u'http://www.lalibre.be/rss/?section=10'),
+ (u"L'actu", u'http://www.lalibre.be/rss/?section=10'),
(u'Culture', u'http://www.lalibre.be/rss/?section=5'),
(u'Economie', u'http://www.lalibre.be/rss/?section=3'),
(u'Libre Entreprise', u'http://www.lalibre.be/rss/?section=904'),
diff --git a/recipes/lanacion.recipe b/recipes/lanacion.recipe
index a8290a0337..326b9a575f 100644
--- a/recipes/lanacion.recipe
+++ b/recipes/lanacion.recipe
@@ -9,7 +9,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class Lanacion(BasicNewsRecipe):
title = 'La Nacion'
__author__ = 'Darko Miletic'
- description = "lanacion.com - Informacion actualizada las 24 horas, con noticias de Argentina y del mundo"
+ description = 'lanacion.com - Informacion actualizada las 24 horas, con noticias de Argentina y del mundo'
publisher = 'La Nacion S.A.'
category = 'news, politics, Argentina'
oldest_article = 1
@@ -22,13 +22,13 @@ class Lanacion(BasicNewsRecipe):
publication_type = 'newspaper'
remove_empty_feeds = True
masthead_url = 'http://www.lanacion.com.ar/_ui/desktop/imgs/layout/logos/ln-home.gif'
- extra_css = """
+ extra_css = '''
h1{font-family: TheSans,Arial,sans-serif}
body{font-family: Arial,sans-serif}
img{display: block}
.firma,.fecha{font-size: small}
.epigrafe-columna{font-size: x-small}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/lapoliticaonline_ar.recipe b/recipes/lapoliticaonline_ar.recipe
index 7134b6dcf6..aa1e78310d 100644
--- a/recipes/lapoliticaonline_ar.recipe
+++ b/recipes/lapoliticaonline_ar.recipe
@@ -9,7 +9,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class LaPoliticaOnline_AR(BasicNewsRecipe):
title = 'La Politica Online'
__author__ = 'Darko Miletic'
- description = "Informacion actualizada las 24 horas, con noticias de Argentina y del mundo"
+ description = 'Informacion actualizada las 24 horas, con noticias de Argentina y del mundo'
publisher = 'La Politica Online SA'
category = 'news, politics, Argentina'
oldest_article = 1
@@ -22,13 +22,13 @@ class LaPoliticaOnline_AR(BasicNewsRecipe):
publication_type = 'newspaper'
remove_empty_feeds = True
masthead_url = 'http://www.lapoliticaonline.com/0/img/header/logo.gif'
- extra_css = """
+ extra_css = '''
.title,.vsmcontent{font-family: Georgia,"Times New Roman",Times,serif}
body{font-family: Arial,Helvetica,sans-serif}
.galleryfooter{font-size: small; color: gainsboro;}
img{display: block}
.title{font-size: x-large; font-weight: bold; line-height: 2em;}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/laprensa.recipe b/recipes/laprensa.recipe
index 820edf8abe..bf04fa9dfa 100644
--- a/recipes/laprensa.recipe
+++ b/recipes/laprensa.recipe
@@ -35,33 +35,33 @@ class LaPrensa(BasicNewsRecipe):
filter_regexps = [r'.*archive.aspx.*']
remove_tags = [
- dict(name='td', attrs={'class': ["link-registro", "link-buscador"]}),
+ dict(name='td', attrs={'class': ['link-registro', 'link-buscador']}),
dict(name='td', attrs={
- 'id': ["TDTabItem1", "TDTabItem2", "TDTabItem3", "TDTabItem4"]}),
- dict(name='table', attrs={'class': ["marco-botonera"]}),
- dict(name='tr', attrs={'class': ["messages", "IUTabItemSelected"]}),
- dict(name='input', attrs={'id': "txt_allfields"}),
+ 'id': ['TDTabItem1', 'TDTabItem2', 'TDTabItem3', 'TDTabItem4']}),
+ dict(name='table', attrs={'class': ['marco-botonera']}),
+ dict(name='tr', attrs={'class': ['messages', 'IUTabItemSelected']}),
+ dict(name='input', attrs={'id': 'txt_allfields'}),
dict(name='div', attrs={
- 'id': ["TabItem1", "TabItem2", "TabItem3", "TabItem4", "RCPanel"]}),
- dict(name='span', attrs={'id': ["GWCNavigatorControl", "_ctl15"]}),
- dict(name='span', attrs={'class': ["ranking-titulo", "IUTab"]}),
- dict(name='a', attrs={'class': ["link-registro", ]}),
- dict(name='img', src="/versions/1/imgs/icono-comentario.gif"),
- dict(name='img', src="/versions/1/imgs/logo.gif"),
- dict(name='img', src="/versions/1/imgs/boton-ingresar-roll.gif"),
- dict(name='img', src="/versions/1/imgs/icono-recomendar.gif"),
+ 'id': ['TabItem1', 'TabItem2', 'TabItem3', 'TabItem4', 'RCPanel']}),
+ dict(name='span', attrs={'id': ['GWCNavigatorControl', '_ctl15']}),
+ dict(name='span', attrs={'class': ['ranking-titulo', 'IUTab']}),
+ dict(name='a', attrs={'class': ['link-registro', ]}),
+ dict(name='img', src='/versions/1/imgs/icono-comentario.gif'),
+ dict(name='img', src='/versions/1/imgs/logo.gif'),
+ dict(name='img', src='/versions/1/imgs/boton-ingresar-roll.gif'),
+ dict(name='img', src='/versions/1/imgs/icono-recomendar.gif'),
dict(name='button'),
- dict(name='img', src="/versions/1/imgs/boton-votar-roll.gif"),
- dict(name='img', src="/versions/1/imgs/boton-ingresar.gif"),
- dict(name='img', src="/versions/1/imgs/icono-imprimir.gif"),
- dict(name='img', src="/versions/1/imgs/icono-ampliar-letra.gif"),
- dict(name='img', src="/versions/1/imgs/icono-reducir-letra.gif"),
- dict(name='img', src="/versions/1/imgs/pix-trans.gif"),
- dict(name='img', src="/versions/1/imgs/icono-buscador.gif"),
- dict(name='img', src="/versions/1/imgs/separador-linea-azul.gif"),
- dict(name='img', src=" /versions/1/imgs/separador-linea.gif"),
- dict(name='a', text="Powered by Civinext Groupware - V. 2.0.3567.23706"),
- dict(name='img', height="0")
+ dict(name='img', src='/versions/1/imgs/boton-votar-roll.gif'),
+ dict(name='img', src='/versions/1/imgs/boton-ingresar.gif'),
+ dict(name='img', src='/versions/1/imgs/icono-imprimir.gif'),
+ dict(name='img', src='/versions/1/imgs/icono-ampliar-letra.gif'),
+ dict(name='img', src='/versions/1/imgs/icono-reducir-letra.gif'),
+ dict(name='img', src='/versions/1/imgs/pix-trans.gif'),
+ dict(name='img', src='/versions/1/imgs/icono-buscador.gif'),
+ dict(name='img', src='/versions/1/imgs/separador-linea-azul.gif'),
+ dict(name='img', src=' /versions/1/imgs/separador-linea.gif'),
+ dict(name='a', text='Powered by Civinext Groupware - V. 2.0.3567.23706'),
+ dict(name='img', height='0')
]
extra_css = '''
@@ -95,8 +95,8 @@ class LaPrensa(BasicNewsRecipe):
soup.head.insert(0, mtag)
for item in soup.findAll(style=True):
del item['style']
- for item in soup.findAll(align="center"):
+ for item in soup.findAll(align='center'):
del item['align']
- for item in soup.findAll(bgcolor="ffffff"):
+ for item in soup.findAll(bgcolor='ffffff'):
del item['bgcolor']
return soup
diff --git a/recipes/le_canard_enchaine.recipe b/recipes/le_canard_enchaine.recipe
index 2da45db09f..a4ecd4862d 100644
--- a/recipes/le_canard_enchaine.recipe
+++ b/recipes/le_canard_enchaine.recipe
@@ -61,7 +61,7 @@ class LeCanardEnchaine(BasicNewsRecipe):
elif img and img.get('src'):
return 'https://boutique.lecanardenchaine.fr' + img['src']
- self.log.info('Aucune couverture trouvée, utilisation de l\'image par défaut')
+ self.log.info("Aucune couverture trouvée, utilisation de l'image par défaut")
return 'https://image.ausha.co/2x1H3rkhwjmSwAa8KzIFfcN0G9GxfJWY83UafXn8_400x400.jpeg'
except Exception:
self.log.exception('Erreur lors de la récupération de la couverture')
@@ -90,7 +90,7 @@ class LeCanardEnchaine(BasicNewsRecipe):
feeds = []
for section_title, section_url in self.SECTIONS.items():
- print(f"Exploration de la rubrique : {section_title}")
+ print(f'Exploration de la rubrique : {section_title}')
articles = []
try:
url = 'https://www.lecanardenchaine.fr' + section_url
@@ -119,10 +119,10 @@ class LeCanardEnchaine(BasicNewsRecipe):
if unique_articles:
feeds.append((section_title, unique_articles))
- print(f" {len(unique_articles)} articles trouvés")
+ print(f' {len(unique_articles)} articles trouvés')
except Exception as e:
- print(f"Erreur sur {section_title}: {str(e)}")
+ print(f'Erreur sur {section_title}: {str(e)}')
return feeds
diff --git a/recipes/le_gorafi.recipe b/recipes/le_gorafi.recipe
index ee3adcecce..494fba7c0c 100644
--- a/recipes/le_gorafi.recipe
+++ b/recipes/le_gorafi.recipe
@@ -13,7 +13,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class LeGorafi(BasicNewsRecipe):
title = u'Le GORAFI.fr'
__author__ = 'Malah, LAntoine'
- description = u'Depuis 1826, toute l\'information de sources contradictoires'
+ description = u"Depuis 1826, toute l'information de sources contradictoires"
oldest_article = 7
language = 'fr'
max_articles_per_feed = 100
@@ -54,7 +54,7 @@ class LeGorafi(BasicNewsRecipe):
soup = self.index_to_soup(article.url)
img = soup.select_one('#mvp-post-feat-img img')
return img['data-lazy-src']
- print("No cover found")
+ print('No cover found')
return None
def parse_feeds(self):
diff --git a/recipes/le_monde_diplomatique_fr.recipe b/recipes/le_monde_diplomatique_fr.recipe
index d2804a49a1..a07e9f78bf 100644
--- a/recipes/le_monde_diplomatique_fr.recipe
+++ b/recipes/le_monde_diplomatique_fr.recipe
@@ -104,7 +104,7 @@ class LeMondeDiplomatiqueSiteWeb(BasicNewsRecipe):
'url': absurl(feed_link['href']),
'description': description
})
- return [("La valise diplomatique", articles)]
+ return [('La valise diplomatique', articles)]
def parse_index_cartes(self):
articles = []
@@ -125,7 +125,7 @@ class LeMondeDiplomatiqueSiteWeb(BasicNewsRecipe):
'url': absurl(feed_link['href']),
'description': author
})
- return [("Cartes", articles)]
+ return [('Cartes', articles)]
def parse_feeds(self):
feeds = BasicNewsRecipe.parse_feeds(self)
diff --git a/recipes/le_monde_sub_paper.recipe b/recipes/le_monde_sub_paper.recipe
index 0b6a7de449..46b366a4d0 100644
--- a/recipes/le_monde_sub_paper.recipe
+++ b/recipes/le_monde_sub_paper.recipe
@@ -51,7 +51,7 @@ class LeMondeAbonne(BasicNewsRecipe):
zipurl_format = 'http://medias.lemonde.fr/abonnes/editionelectronique/%Y%m%d/html/%y%m%d.zip'
coverurl_format = '/img/%y%m%d01.jpg'
masthead_url = 'http://upload.wikimedia.org/wikipedia/commons/thumb/5/54/Le_monde_logo.svg/800px-Le_monde_logo.svg.png'
- path_format = "%y%m%d"
+ path_format = '%y%m%d'
keep_only_tags = [
dict(name=['h1']),
@@ -66,7 +66,7 @@ class LeMondeAbonne(BasicNewsRecipe):
dict(name='div', attrs={'class': 'po-copy'})
]
- article_id_pattern = re.compile("[0-9]+\\.html")
+ article_id_pattern = re.compile('[0-9]+\\.html')
article_url_format = 'http://www.lemonde.fr/journalelectronique/donnees/protege/%Y%m%d/html/'
def get_browser(self):
@@ -92,7 +92,7 @@ class LeMondeAbonne(BasicNewsRecipe):
for i in range(7):
self.ltime = time.gmtime(second)
- self.timefmt = time.strftime(" %A %d %B %Y",
+ self.timefmt = time.strftime(' %A %d %B %Y',
self.ltime).decode(preferred_encoding)
url = time.strftime(self.zipurl_format, self.ltime)
try:
@@ -113,7 +113,7 @@ class LeMondeAbonne(BasicNewsRecipe):
zfile.close()
path = os.path.join(
- self.output_dir, time.strftime(self.path_format, self.ltime), "data"
+ self.output_dir, time.strftime(self.path_format, self.ltime), 'data'
)
self.articles_path = path
@@ -121,7 +121,7 @@ class LeMondeAbonne(BasicNewsRecipe):
files = os.listdir(path)
nb_index_files = len([
- name for name in files if re.match("frame_gauche_[0-9]+.html", name)
+ name for name in files if re.match('frame_gauche_[0-9]+.html', name)
])
flux = []
@@ -129,39 +129,39 @@ class LeMondeAbonne(BasicNewsRecipe):
article_url = time.strftime(self.article_url_format, self.ltime)
for i in range(nb_index_files):
- filename = os.path.join(path, "selection_%d.html" % (i + 1))
+ filename = os.path.join(path, 'selection_%d.html' % (i + 1))
with open(filename, 'rb') as tmp:
soup = self.index_to_soup(tmp.read())
title = soup.find('span').contents[0]
- if title == "Une":
- title = "À la une"
- if title == "Evenement":
+ if title == 'Une':
+ title = 'À la une'
+ if title == 'Evenement':
title = "L'événement"
- if title == "Planete":
- title = "Planète"
- if title == "Economie - Entreprises":
- title = "Économie"
+ if title == 'Planete':
+ title = 'Planète'
+ if title == 'Economie - Entreprises':
+ title = 'Économie'
if title == "L'Oeil du Monde":
title = "L'œil du Monde"
- if title == "Enquete":
- title = "Enquête"
- if title == "Editorial - Analyses":
- title = "Horizons"
- if title == "Le Monde Economie":
- title = "Économie"
- if title == "Lettre et chronique":
- title = "Idées"
- if title == "Le Monde Géo et politique":
- title = "Géopolitique"
- if title == "Météo - Jeux - Ecrans":
- title = "Économie & Entreprise"
+ if title == 'Enquete':
+ title = 'Enquête'
+ if title == 'Editorial - Analyses':
+ title = 'Horizons'
+ if title == 'Le Monde Economie':
+ title = 'Économie'
+ if title == 'Lettre et chronique':
+ title = 'Idées'
+ if title == 'Le Monde Géo et politique':
+ title = 'Géopolitique'
+ if title == 'Météo - Jeux - Ecrans':
+ title = 'Économie & Entreprise'
tmp.close()
- filename = os.path.join(path, "frame_gauche_%d.html" % (i + 1))
+ filename = os.path.join(path, 'frame_gauche_%d.html' % (i + 1))
with open(filename, 'rb') as tmp:
soup = self.index_to_soup(tmp.read())
articles = []
- for link in soup.findAll("a"):
+ for link in soup.findAll('a'):
article_file = link['href']
article_id = self.article_id_pattern.search(article_file).group()
article = {
diff --git a/recipes/le_peuple_breton.recipe b/recipes/le_peuple_breton.recipe
index cad05cc0c5..ce6e126c26 100644
--- a/recipes/le_peuple_breton.recipe
+++ b/recipes/le_peuple_breton.recipe
@@ -10,7 +10,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class LePeupleBreton(BasicNewsRecipe):
title = 'Le Peuple Breton'
__author__ = 'Lionel Plais'
- description = u'Aujourd\'hui, être libre c\'est être informé'
+ description = u"Aujourd'hui, être libre c'est être informé"
oldest_article = 90
language = 'fr'
cover_img_url = 'http://lepeuplebreton.bzh/wp-content/uploads/2017/11/le-peuple-breton-logo.jpg'
diff --git a/recipes/leggo_it.recipe b/recipes/leggo_it.recipe
index 2ad79b1db8..bb8a6176a9 100644
--- a/recipes/leggo_it.recipe
+++ b/recipes/leggo_it.recipe
@@ -53,8 +53,8 @@ class LeggoIT(BasicNewsRecipe):
cover = None
st = time.localtime()
year = str(st.tm_year)
- month = "%.2d" % st.tm_mon
- day = "%.2d" % st.tm_mday
+ month = '%.2d' % st.tm_mon
+ day = '%.2d' % st.tm_mday
cover = 'http://www.leggo.it/' + year + month + day + '/jpeg/LEGGO_ROMA_1.jpg'
br = BasicNewsRecipe.get_browser(self)
try:
@@ -65,6 +65,6 @@ class LeggoIT(BasicNewsRecipe):
try:
br.open(cover)
except:
- self.log("\nCover unavailable")
+ self.log('\nCover unavailable')
cover = 'http://www.leggo.it/img/logo-leggo2.gif'
return cover
diff --git a/recipes/lemonde_dip.recipe b/recipes/lemonde_dip.recipe
index 2563c5cd1e..bc2f6614d4 100644
--- a/recipes/lemonde_dip.recipe
+++ b/recipes/lemonde_dip.recipe
@@ -30,13 +30,13 @@ class LeMondeDiplomatiqueEn(BasicNewsRecipe):
INDEX = PREFIX + strftime('%Y/%m/')
use_embedded_content = False
language = 'en'
- extra_css = """
+ extra_css = '''
body{font-family: "Luxi sans","Lucida sans","Lucida Grande",Lucida,"Lucida Sans Unicode",sans-serif}
.surtitre{font-size: 1.2em; font-variant: small-caps; margin-bottom: 0.5em}
.chapo{font-size: 1.2em; font-weight: bold; margin: 1em 0 0.5em}
.texte{font-family: Georgia,"Times New Roman",serif} h1{color: #990000}
.notes{border-top: 1px solid #CCCCCC; font-size: 0.9em; line-height: 1.4em}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/lepoint.recipe b/recipes/lepoint.recipe
index 47a59d7994..db8bfe6165 100644
--- a/recipes/lepoint.recipe
+++ b/recipes/lepoint.recipe
@@ -71,6 +71,6 @@ class lepoint(BasicNewsRecipe):
try:
br.open(masthead)
except:
- self.log("\nCover unavailable")
+ self.log('\nCover unavailable')
masthead = None
return masthead
diff --git a/recipes/lexpress.recipe b/recipes/lexpress.recipe
index 773c22818f..a8ba0a30b0 100644
--- a/recipes/lexpress.recipe
+++ b/recipes/lexpress.recipe
@@ -15,7 +15,7 @@ def classes(classes):
class lepoint(BasicNewsRecipe):
- title = 'L\'express'
+ title = "L'express"
__author__ = 'calibre'
description = 'Actualités'
publisher = 'LExpress.fr'
@@ -73,6 +73,6 @@ class lepoint(BasicNewsRecipe):
try:
br.open(masthead)
except:
- self.log("\nCover unavailable")
+ self.log('\nCover unavailable')
masthead = None
return masthead
diff --git a/recipes/liberation.recipe b/recipes/liberation.recipe
index c0f9702986..531b02a41f 100644
--- a/recipes/liberation.recipe
+++ b/recipes/liberation.recipe
@@ -78,9 +78,9 @@ class Liberation(BasicNewsRecipe):
title = 'Libération'
__author__ = 'unkn0wn'
description = (
- 'Libération est un quotidien d\'information libre, vigilant et engagé. L\'objectif de Libération est de '
+ "Libération est un quotidien d'information libre, vigilant et engagé. L'objectif de Libération est de "
'fournir une information complète et vérifiée, dans tous les domaines. Sans préjugés, ni complaisance, '
- 'ses enquêtes reportages et analyses s\'emploient à comprendre et à décrire l\'actualité et à révéler '
+ "ses enquêtes reportages et analyses s'emploient à comprendre et à décrire l'actualité et à révéler "
'les mutations des sociétés et des cultures.'
)
language = 'fr'
diff --git a/recipes/libertad_digital.recipe b/recipes/libertad_digital.recipe
index 55a90adba3..b3a571345b 100644
--- a/recipes/libertad_digital.recipe
+++ b/recipes/libertad_digital.recipe
@@ -22,10 +22,10 @@ class LibertadDigital(BasicNewsRecipe):
remove_empty_feeds = True
publication_type = 'website'
masthead_url = 'http://s.libertaddigital.com/images/logo.gif'
- extra_css = """
+ extra_css = '''
body{font-family: Verdana,sans-serif }
img{margin-bottom: 0.4em; display:block}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/livemint.recipe b/recipes/livemint.recipe
index 8cdbbf8bec..90c8e5a05b 100644
--- a/recipes/livemint.recipe
+++ b/recipes/livemint.recipe
@@ -42,7 +42,7 @@ class LiveMint(BasicNewsRecipe):
if 'MINT_FRONT_1' in x['src']:
return 'https://epaper.livemint.com' + x['src'].replace('-S', '')
- extra_css = """
+ extra_css = '''
img {margin:0 auto;}
.psTopLogoItem img, .ecologoStory { width:100; }
#img-cap {font-size:small; text-align:center;}
@@ -51,7 +51,7 @@ class LiveMint(BasicNewsRecipe):
}
em, blockquote {color:#202020;}
.moreAbout, .articleInfo, .metaData, .psTopicsHeading, .topicsTag, .auth {font-size:small;}
- """
+ '''
keep_only_tags = [
dict(
diff --git a/recipes/livescience.recipe b/recipes/livescience.recipe
index 2cc26e321e..5506d15fc7 100644
--- a/recipes/livescience.recipe
+++ b/recipes/livescience.recipe
@@ -5,8 +5,8 @@ from calibre.web.feeds.news import BasicNewsRecipe
class LiveScience(BasicNewsRecipe):
- title = "Live Science"
- description = "For the science geek in everyone! Stories on the latest findings from science journals and institutions. Sourced from livescience.com"
+ title = 'Live Science'
+ description = 'For the science geek in everyone! Stories on the latest findings from science journals and institutions. Sourced from livescience.com'
__author__ = 'yodha8'
language = 'en'
oldest_article = 7
diff --git a/recipes/lwn_free.recipe b/recipes/lwn_free.recipe
index 0a9c1d9993..7cc7c473a4 100644
--- a/recipes/lwn_free.recipe
+++ b/recipes/lwn_free.recipe
@@ -5,7 +5,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class LWNFree(BasicNewsRecipe):
- title = "LWN Linux Weekly News (Free)"
+ title = 'LWN Linux Weekly News (Free)'
language = 'en'
__author__ = 'yodha8'
description = "LWN is published every Thursday. This recipe skips current week's articles (subscriber-only) and pulls free articles from previous week."
@@ -18,7 +18,7 @@ class LWNFree(BasicNewsRecipe):
]
def parse_feeds(self):
- """Remove paid articles and articles older than a week."""
+ '''Remove paid articles and articles older than a week.'''
prev_feeds = super().parse_feeds()
@@ -28,12 +28,12 @@ class LWNFree(BasicNewsRecipe):
for article in prev_feeds[0]:
# Paid article
- if "[$]" in article.title:
+ if '[$]' in article.title:
remove_articles.append(article)
continue
# Count how many free weekly edition we passed
- if "Weekly Edition" in article.title:
+ if 'Weekly Edition' in article.title:
weekly_count += 1
# Remove all articles starting from 2nd free weekly edition
diff --git a/recipes/lwn_weekly.recipe b/recipes/lwn_weekly.recipe
index 8917cbc423..6867f22ccb 100644
--- a/recipes/lwn_weekly.recipe
+++ b/recipes/lwn_weekly.recipe
@@ -137,7 +137,7 @@ class WeeklyLWN(BasicNewsRecipe):
article_title = _('Undefined article title')
if subsection:
- section_title = "%s: %s" % (section, subsection)
+ section_title = '%s: %s' % (section, subsection)
else:
section_title = section
@@ -170,7 +170,7 @@ class WeeklyLWN(BasicNewsRecipe):
})
else:
- self.log.error("lwn_weekly.recipe: something bad happened; should not be able to reach this")
+ self.log.error('lwn_weekly.recipe: something bad happened; should not be able to reach this')
ans = [(section2, articles[section2])
for section2 in ans if section2 in articles]
diff --git a/recipes/mainichi.recipe b/recipes/mainichi.recipe
index 69c0159996..4eaa4f02e5 100644
--- a/recipes/mainichi.recipe
+++ b/recipes/mainichi.recipe
@@ -1,7 +1,7 @@
#!/usr/bin/env python
-"""
+'''
www.mainichi.jp
-"""
+'''
from calibre.web.feeds.news import BasicNewsRecipe
@@ -9,11 +9,11 @@ from calibre.web.feeds.news import BasicNewsRecipe
class MainichiDailyNews(BasicNewsRecipe):
title = u'\u6bce\u65e5\u65b0\u805e'
__author__ = 'unkn0wn'
- description = "Japanese traditional newspaper Mainichi Daily News"
- publisher = "Mainichi News"
- publication_type = "newspaper"
- category = "news, japan"
- language = "ja"
+ description = 'Japanese traditional newspaper Mainichi Daily News'
+ publisher = 'Mainichi News'
+ publication_type = 'newspaper'
+ category = 'news, japan'
+ language = 'ja'
no_stylesheets = True
remove_javascript = True
diff --git a/recipes/mainichi_en.recipe b/recipes/mainichi_en.recipe
index ec74c962df..6f083554e3 100644
--- a/recipes/mainichi_en.recipe
+++ b/recipes/mainichi_en.recipe
@@ -1,23 +1,23 @@
#!/usr/bin/env python
-"""
+'''
www.mainichi.jp/english
-"""
+'''
from calibre.web.feeds.news import BasicNewsRecipe
class MainichiEnglishNews(BasicNewsRecipe):
- title = u"The Mainichi"
+ title = u'The Mainichi'
__author__ = 'unkn0wn'
- description = "Japanese traditional newspaper Mainichi news in English"
- publisher = "Mainichi News"
- publication_type = "newspaper"
- category = "news, japan"
- language = "en_JP"
+ description = 'Japanese traditional newspaper Mainichi news in English'
+ publisher = 'Mainichi News'
+ publication_type = 'newspaper'
+ category = 'news, japan'
+ language = 'en_JP'
- index = "http://mainichi.jp/english/"
- masthead_url = index + "images/themainichi.png"
+ index = 'http://mainichi.jp/english/'
+ masthead_url = index + 'images/themainichi.png'
no_stylesheets = True
remove_javascript = True
diff --git a/recipes/mainichi_science_news.recipe b/recipes/mainichi_science_news.recipe
index d7ede8543b..381e145107 100644
--- a/recipes/mainichi_science_news.recipe
+++ b/recipes/mainichi_science_news.recipe
@@ -22,8 +22,8 @@ class MainichiDailyScienceNews(BasicNewsRecipe):
remove_javascript = True
masthead_title = u'MAINICHI DAILY NEWS'
- remove_tags_before = {'class': "NewsTitle"}
- remove_tags_after = {'class': "NewsBody clr"}
+ remove_tags_before = {'class': 'NewsTitle'}
+ remove_tags_after = {'class': 'NewsBody clr'}
def parse_feeds(self):
diff --git a/recipes/marca.recipe b/recipes/marca.recipe
index fd64c937f9..0235ce8ff4 100644
--- a/recipes/marca.recipe
+++ b/recipes/marca.recipe
@@ -23,12 +23,12 @@ class Marca(BasicNewsRecipe):
language = 'es'
publication_type = 'newsportal'
masthead_url = 'http://estaticos.marca.com/deporte/img/v3.0/img_marca-com.png'
- extra_css = """
+ extra_css = '''
body{font-family: Tahoma,Geneva,sans-serif}
h1,h2,h3,h4,h5,h6{font-family: 'LatoBlack',Tahoma,Geneva,sans-serif}
.cab_articulo h4 {font-family: Georgia,"Times New Roman",Times,serif}
.antetitulo{text-transform: uppercase}
- """
+ '''
feeds = [(u'Portada', u'http://estaticos.marca.com/rss/portada.xml')]
diff --git a/recipes/marctv.recipe b/recipes/marctv.recipe
index ca5cbe5f84..87890a95a9 100644
--- a/recipes/marctv.recipe
+++ b/recipes/marctv.recipe
@@ -26,7 +26,7 @@ class MarcTVde(BasicNewsRecipe):
remove_tags = []
- keep_only_tags = dict(name='div', attrs={'class': ["content"]})
+ keep_only_tags = dict(name='div', attrs={'class': ['content']})
feeds = [
(u'Spiele', u'http://feeds.feedburner.com/marctv/spiele'),
diff --git a/recipes/mediaindonesia.recipe b/recipes/mediaindonesia.recipe
index e282ac450d..4f5dd5b740 100644
--- a/recipes/mediaindonesia.recipe
+++ b/recipes/mediaindonesia.recipe
@@ -24,7 +24,7 @@ class Media(BasicNewsRecipe):
no_javascript = True
remove_tags = [dict(id=['atas', 'merah', 'putih']), dict(name='a')]
- remove_tags_after = [dict(id="putih")]
+ remove_tags_after = [dict(id='putih')]
extra_css = '''
.judul {font-size: x-large;}
diff --git a/recipes/mediapart.recipe b/recipes/mediapart.recipe
index 67e97aeb6e..166cea0044 100644
--- a/recipes/mediapart.recipe
+++ b/recipes/mediapart.recipe
@@ -49,7 +49,7 @@ class Mediapart(BasicNewsRecipe):
conversion_options = {'smarten_punctuation': True}
- masthead_url = "https://raw.githubusercontent.com/lhoupert/calibre_contrib/main/mediapart_masthead.png"
+ masthead_url = 'https://raw.githubusercontent.com/lhoupert/calibre_contrib/main/mediapart_masthead.png'
ignore_duplicate_articles = {'title'}
resolve_internal_links = True
diff --git a/recipes/merco_press.recipe b/recipes/merco_press.recipe
index 88d1eaaaa5..77433a188c 100644
--- a/recipes/merco_press.recipe
+++ b/recipes/merco_press.recipe
@@ -3,7 +3,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class MercoPress(BasicNewsRecipe):
title = u'Merco Press'
- description = u"Read News, Stories and Insight Analysis from Latin America and Mercosur. Politics, Economy, Business and Investments in South America."
+ description = u'Read News, Stories and Insight Analysis from Latin America and Mercosur. Politics, Economy, Business and Investments in South America.'
cover_url = 'http://en.mercopress.com/web/img/en/mercopress-logo.gif'
__author__ = 'Russell Phillips'
diff --git a/recipes/mit_technology_review.recipe b/recipes/mit_technology_review.recipe
index dc14af12e2..1bc7b4d58c 100644
--- a/recipes/mit_technology_review.recipe
+++ b/recipes/mit_technology_review.recipe
@@ -19,7 +19,7 @@ def absurl(x):
if x.startswith('//'):
x = 'http:' + x
elif not x.startswith('http'):
- x = "http://www.technologyreview.com" + x
+ x = 'http://www.technologyreview.com' + x
return x
@@ -58,8 +58,8 @@ class MitTechnologyReview(BasicNewsRecipe):
prefixed_classes('contentHeader contentArticleHeader contentBody')
]
remove_tags = [
- dict(name="aside"),
- dict(name="svg"),
+ dict(name='aside'),
+ dict(name='svg'),
prefixed_classes(
'image__placeholder sliderAd__wrapper eyebrow__wrap-- screen-reader-text'
),
@@ -83,7 +83,7 @@ class MitTechnologyReview(BasicNewsRecipe):
if script := soup.find('script', id='preload'):
raw = script.contents[0]
m = re.search(r'\"children\":\[{\"name\":\"magazine-hero\"', raw)
- spl = re.split(r"(?=\{)", raw[m.start():], 1)[1]
+ spl = re.split(r'(?=\{)', raw[m.start():], 1)[1]
data = json.JSONDecoder().raw_decode(spl)[0]
self.cover_url = data['children'][0]['config']['src'] + '?fit=572,786'
self.timefmt = ' [' + data['config']['issueDate'] + ']'
@@ -94,7 +94,7 @@ class MitTechnologyReview(BasicNewsRecipe):
feeds = OrderedDict()
classNamePrefixes = [
- "magazineHero__letter--", "teaserItem__title", "teaserItem--aside__title"
+ 'magazineHero__letter--', 'teaserItem__title', 'teaserItem--aside__title'
]
for div in soup.findAll(
attrs={
diff --git a/recipes/mmc_rtv.recipe b/recipes/mmc_rtv.recipe
index 67bc888d90..d39266c3e5 100644
--- a/recipes/mmc_rtv.recipe
+++ b/recipes/mmc_rtv.recipe
@@ -10,7 +10,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class MMCRTV(BasicNewsRecipe):
title = u'MMC RTV Slovenija'
__author__ = u'TadejS'
- description = u"Prvi interaktivni multimedijski portal, MMC RTV Slovenija"
+ description = u'Prvi interaktivni multimedijski portal, MMC RTV Slovenija'
oldest_article = 3
max_articles_per_feed = 100
language = 'sl'
diff --git a/recipes/modoros.recipe b/recipes/modoros.recipe
index a934eb8427..a65499f08c 100644
--- a/recipes/modoros.recipe
+++ b/recipes/modoros.recipe
@@ -15,7 +15,7 @@ from hashlib import md5
class ModorosBlogHu(BasicNewsRecipe):
__author__ = 'Zsolt Botykai'
title = u'Modoros Blog'
- description = u"Modoros.blog.hu"
+ description = u'Modoros.blog.hu'
oldest_article = 10000
max_articles_per_feed = 10000
reverse_article_order = True
diff --git a/recipes/montreal_gazette.recipe b/recipes/montreal_gazette.recipe
index 6701af2093..cc3633e2a2 100644
--- a/recipes/montreal_gazette.recipe
+++ b/recipes/montreal_gazette.recipe
@@ -164,24 +164,24 @@ class CanWestPaper(BasicNewsRecipe):
continue
break
if daysback == 7:
- self.log("\nCover unavailable")
+ self.log('\nCover unavailable')
cover = None
return cover
def fixChars(self, string):
# Replace lsquo (\x91)
- fixed = re.sub("\x91", "‘", string)
+ fixed = re.sub('\x91', '‘', string)
# Replace rsquo (\x92)
- fixed = re.sub("\x92", "’", fixed)
+ fixed = re.sub('\x92', '’', fixed)
# Replace ldquo (\x93)
- fixed = re.sub("\x93", "“", fixed)
+ fixed = re.sub('\x93', '“', fixed)
# Replace rdquo (\x94)
- fixed = re.sub("\x94", "”", fixed)
+ fixed = re.sub('\x94', '”', fixed)
# Replace ndash (\x96)
- fixed = re.sub("\x96", "–", fixed)
+ fixed = re.sub('\x96', '–', fixed)
# Replace mdash (\x97)
- fixed = re.sub("\x97", "—", fixed)
- fixed = re.sub("’", "’", fixed)
+ fixed = re.sub('\x97', '—', fixed)
+ fixed = re.sub('’', '’', fixed)
return fixed
def massageNCXText(self, description):
@@ -262,10 +262,10 @@ class CanWestPaper(BasicNewsRecipe):
if url.startswith('/'):
url = self.url_prefix + url
if not url.startswith(self.url_prefix):
- print("Rejected " + url)
+ print('Rejected ' + url)
return
if url in self.url_list:
- print("Rejected dup " + url)
+ print('Rejected dup ' + url)
return
self.url_list.append(url)
title = self.tag_to_string(atag, False)
@@ -277,8 +277,8 @@ class CanWestPaper(BasicNewsRecipe):
return
dtag = adiv.find('div', 'content')
description = ''
- print("URL " + url)
- print("TITLE " + title)
+ print('URL ' + url)
+ print('TITLE ' + title)
if dtag is not None:
stag = dtag.span
if stag is not None:
@@ -286,18 +286,18 @@ class CanWestPaper(BasicNewsRecipe):
description = self.tag_to_string(stag, False)
else:
description = self.tag_to_string(dtag, False)
- print("DESCRIPTION: " + description)
+ print('DESCRIPTION: ' + description)
if key not in articles:
articles[key] = []
articles[key].append(dict(
title=title, url=url, date='', description=description, author='', content=''))
def parse_web_index(key, keyurl):
- print("Section: " + key + ': ' + self.url_prefix + keyurl)
+ print('Section: ' + key + ': ' + self.url_prefix + keyurl)
try:
soup = self.index_to_soup(self.url_prefix + keyurl)
except:
- print("Section: " + key + ' NOT FOUND')
+ print('Section: ' + key + ' NOT FOUND')
return
ans.append(key)
mainsoup = soup.find('div', 'bodywrapper')
diff --git a/recipes/nacional_cro.recipe b/recipes/nacional_cro.recipe
index 997e6903c1..97333d6c9b 100644
--- a/recipes/nacional_cro.recipe
+++ b/recipes/nacional_cro.recipe
@@ -23,7 +23,7 @@ def new_tag(soup, name, attrs=()):
class NacionalCro(BasicNewsRecipe):
title = 'Nacional - Hr'
__author__ = 'Darko Miletic'
- description = "news from Croatia"
+ description = 'news from Croatia'
publisher = 'Nacional.hr'
category = 'news, politics, Croatia'
oldest_article = 2
@@ -53,9 +53,9 @@ class NacionalCro(BasicNewsRecipe):
soup.html['lang'] = self.lang
soup.html['dir'] = self.direction
mlang = new_tag(soup, 'meta', [
- ("http-equiv", "Content-Language"), ("content", self.lang)])
+ ('http-equiv', 'Content-Language'), ('content', self.lang)])
mcharset = new_tag(soup, 'meta', [
- ("http-equiv", "Content-Type"), ("content", "text/html; charset=UTF-8")])
+ ('http-equiv', 'Content-Type'), ('content', 'text/html; charset=UTF-8')])
soup.head.insert(0, mlang)
soup.head.insert(1, mcharset)
for item in soup.findAll(style=True):
diff --git a/recipes/natgeo.recipe b/recipes/natgeo.recipe
index f2c054aa92..047c23a544 100644
--- a/recipes/natgeo.recipe
+++ b/recipes/natgeo.recipe
@@ -44,14 +44,14 @@ class NatGeo(BasicNewsRecipe):
def preprocess_raw_html(self, raw_html, url):
return self.natgeo_parser.extract_html(raw_html)
- extra_css = """
+ extra_css = '''
blockquote { color:#404040; }
.byline, i { font-style:italic; color:#202020; }
.cap { font-size:small; }
img {display:block; margin:0 auto;}
.cred { font-style:italic; font-size:small; color:#404040; }
.auth, .time, .sub { font-size:small; color:#5c5c5c; }
- """
+ '''
def get_cover_url(self):
# soup = self.index_to_soup('https://www.nationalgeographic.com/magazine/')
diff --git a/recipes/natgeo_kids.recipe b/recipes/natgeo_kids.recipe
index 8884f8488d..813c4bce87 100644
--- a/recipes/natgeo_kids.recipe
+++ b/recipes/natgeo_kids.recipe
@@ -41,14 +41,14 @@ class NatGeo(BasicNewsRecipe):
def preprocess_raw_html(self, raw_html, url):
return self.natgeo_parser.extract_html(raw_html)
- extra_css = """
+ extra_css = '''
blockquote { color:#404040; }
.byline, i { font-style:italic; color:#202020; }
.cap { font-size:small; }
img {display:block; margin:0 auto;}
.cred { font-style:italic; font-size:small; color:#404040; }
.auth, .time, .sub { font-size:small; color:#5c5c5c; }
- """
+ '''
def parse_index(self):
index = 'https://kids.nationalgeographic.com/'
diff --git a/recipes/natgeo_traveller.recipe b/recipes/natgeo_traveller.recipe
index eed9e5a0c2..8500b2d8ab 100644
--- a/recipes/natgeo_traveller.recipe
+++ b/recipes/natgeo_traveller.recipe
@@ -44,14 +44,14 @@ class NatGeo(BasicNewsRecipe):
def preprocess_raw_html(self, raw_html, url):
return self.natgeo_parser.extract_html(raw_html)
- extra_css = """
+ extra_css = '''
blockquote { color:#404040; }
.byline, i { font-style:italic; color:#202020; }
.cap { font-size:small; }
img {display:block; margin:0 auto;}
.cred { font-style:italic; font-size:small; color:#404040; }
.auth, .time, .sub { font-size:small; color:#5c5c5c; }
- """
+ '''
def parse_index(self):
pages = [
diff --git a/recipes/natgeohis.recipe b/recipes/natgeohis.recipe
index 61012a18e1..bc56c2be3e 100644
--- a/recipes/natgeohis.recipe
+++ b/recipes/natgeohis.recipe
@@ -43,14 +43,14 @@ class NatGeo(BasicNewsRecipe):
def preprocess_raw_html(self, raw_html, url):
return self.natgeo_parser.extract_html(raw_html)
- extra_css = """
+ extra_css = '''
blockquote { color:#404040; }
.byline, i { font-style:italic; color:#202020; }
.cap { font-size:small; }
img {display:block; margin:0 auto;}
.cred { font-style:italic; font-size:small; color:#404040; }
.auth, .time, .sub { font-size:small; color:#5c5c5c; }
- """
+ '''
def get_cover_url(self):
soup = self.index_to_soup('https://ngsingleissues.nationalgeographic.com/history')
diff --git a/recipes/natgeomag.recipe b/recipes/natgeomag.recipe
index 2826d8c1a0..d2ec0f3f49 100644
--- a/recipes/natgeomag.recipe
+++ b/recipes/natgeomag.recipe
@@ -48,14 +48,14 @@ class NatGeo(BasicNewsRecipe):
def preprocess_raw_html(self, raw_html, url):
return self.natgeo_parser.extract_html(raw_html)
- extra_css = """
+ extra_css = '''
blockquote { color:#404040; }
.byline, i { font-style:italic; color:#202020; }
.cap { font-size:small; }
img {display:block; margin:0 auto;}
.cred { font-style:italic; font-size:small; color:#404040; }
.auth, .time, .sub { font-size:small; color:#5c5c5c; }
- """
+ '''
def parse_index(self):
edition = date.today().strftime('%B-%Y')
diff --git a/recipes/nature.recipe b/recipes/nature.recipe
index 02c70c211a..1484c44903 100644
--- a/recipes/nature.recipe
+++ b/recipes/nature.recipe
@@ -40,14 +40,14 @@ class Nature(BasicNewsRecipe):
no_javascript = True
no_stylesheets = True
- keep_only_tags = [dict(name="article")]
+ keep_only_tags = [dict(name='article')]
remove_tags = [
classes(
- "u-hide-print hide-print c-latest-content__item c-context-bar "
- "c-pdf-button__container u-js-hide"
+ 'u-hide-print hide-print c-latest-content__item c-context-bar '
+ 'c-pdf-button__container u-js-hide'
),
- dict(name="img", attrs={"class": ["visually-hidden"]}),
+ dict(name='img', attrs={'class': ['visually-hidden']}),
]
def parse_index(self):
@@ -56,15 +56,15 @@ class Nature(BasicNewsRecipe):
'img', attrs={'data-test': check_words('issue-cover-image')}
)['src']
try:
- self.cover_url = re.sub(r"\bw\d+\b", "w1000", self.cover_url) # enlarge cover size resolution
+ self.cover_url = re.sub(r'\bw\d+\b', 'w1000', self.cover_url) # enlarge cover size resolution
except:
- """
+ '''
failed, img src might have changed, use default width 200
- """
+ '''
pass
section_tags = soup.find_all(
- "section", attrs={"data-container-type": "issue-section-list"}
+ 'section', attrs={'data-container-type': 'issue-section-list'}
)
sections = defaultdict(list)
diff --git a/recipes/nautilus.recipe b/recipes/nautilus.recipe
index 7ee8788b08..ae310fe111 100644
--- a/recipes/nautilus.recipe
+++ b/recipes/nautilus.recipe
@@ -1,7 +1,7 @@
#!/usr/bin/env python
-"""
+'''
nautil.us
-"""
+'''
from calibre.web.feeds.news import BasicNewsRecipe, classes
@@ -23,14 +23,14 @@ class Nautilus(BasicNewsRecipe):
remove_attributes = ['height', 'width']
ignore_duplicate_articles = {'title', 'url'}
remove_empty_feeds = True
- extra_css = """
+ extra_css = '''
.article-list_item-byline{font-size:small;}
blockquote{color:#404040; text-align:center;}
#fig-c{font-size:small;}
em{color:#202020;}
.breadcrumb{color:gray; font-size:small;}
.article-author{font-size:small;}
- """
+ '''
recipe_specific_options = {
'days': {
diff --git a/recipes/new_scientist.recipe b/recipes/new_scientist.recipe
index ccbc2fc73d..0f9c5f06eb 100644
--- a/recipes/new_scientist.recipe
+++ b/recipes/new_scientist.recipe
@@ -101,7 +101,7 @@ class NewScientist(BasicNewsRecipe):
br = BasicNewsRecipe.get_browser(self)
if self.username is not None and self.password is not None:
def is_login_form(form):
- return "action" in form.attrs and form.attrs['action'] == "/login/"
+ return 'action' in form.attrs and form.attrs['action'] == '/login/'
br.open('https://www.newscientist.com/login/')
br.select_form(predicate=is_login_form)
diff --git a/recipes/new_scientist_mag.recipe b/recipes/new_scientist_mag.recipe
index 9aa04e4ecd..49fe63b97d 100644
--- a/recipes/new_scientist_mag.recipe
+++ b/recipes/new_scientist_mag.recipe
@@ -40,7 +40,7 @@ class NewScientist(BasicNewsRecipe):
br = BasicNewsRecipe.get_browser(self)
if self.username is not None and self.password is not None:
def is_login_form(form):
- return "action" in form.attrs and form.attrs['action'] == "/login/"
+ return 'action' in form.attrs and form.attrs['action'] == '/login/'
br.open('https://www.newscientist.com/login/')
br.select_form(predicate=is_login_form)
diff --git a/recipes/new_statesman.recipe b/recipes/new_statesman.recipe
index 99b41b8765..86ecc7b35f 100644
--- a/recipes/new_statesman.recipe
+++ b/recipes/new_statesman.recipe
@@ -28,10 +28,10 @@ class NewStatesman(BasicNewsRecipe):
ignore_duplicate_articles = {'url'}
masthead_url = 'https://www.newstatesman.com/sites/all/themes/creative-responsive-theme/images/newstatesman_logo@2x.png'
- extra_css = """
+ extra_css = '''
body{font-family: serif}
img{margin-top:1em; margin-bottom: 1em; display:block}
- """
+ '''
conversion_options = {
'comment': description,
diff --git a/recipes/new_yorker.recipe b/recipes/new_yorker.recipe
index 6d8b49f863..b190c6a33c 100644
--- a/recipes/new_yorker.recipe
+++ b/recipes/new_yorker.recipe
@@ -16,7 +16,7 @@ def absurl(x):
class NewYorker(BasicNewsRecipe):
- title = "The New Yorker Magazine"
+ title = 'The New Yorker Magazine'
description = "Articles of the week's New Yorker magazine"
language = 'en_US'
__author__ = 'Kovid Goyal'
@@ -99,9 +99,9 @@ class NewYorker(BasicNewsRecipe):
self.log('Found cover:', self.cover_url)
try:
# the src original resolution w_280 was too low, replace w_280 with w_560
- cover_url_width_index = self.cover_url.find("w_")
+ cover_url_width_index = self.cover_url.find('w_')
old_width = self.cover_url[cover_url_width_index:cover_url_width_index+5]
- self.cover_url = self.cover_url.replace(old_width, "w_640")
+ self.cover_url = self.cover_url.replace(old_width, 'w_640')
except Exception:
self.log('Failed enlarging cover img, using the original one')
@@ -126,7 +126,7 @@ class NewYorker(BasicNewsRecipe):
if rub:
desc = self.tag_to_string(rub) + ' | ' + desc
self.log('\t', title, '\n\t', desc, '\n\t\t', url)
- feeds_dict[section].append({"title": title, "url": url, "description": desc})
+ feeds_dict[section].append({'title': title, 'url': url, 'description': desc})
return feeds_dict.items()
diff --git a/recipes/newrepublicmag.recipe b/recipes/newrepublicmag.recipe
index 61658d1b1c..8e66503405 100644
--- a/recipes/newrepublicmag.recipe
+++ b/recipes/newrepublicmag.recipe
@@ -1,6 +1,6 @@
-"""
+'''
newrepublic.com
-"""
+'''
import json
from functools import cmp_to_key
from urllib.parse import urlencode, urljoin, urlparse, urlsplit
@@ -10,16 +10,16 @@ from calibre.ebooks.BeautifulSoup import BeautifulSoup
from calibre.utils.date import parse_date
from calibre.web.feeds.news import BasicNewsRecipe
-_issue_url = "" # example: https://newrepublic.com/magazine/may-2023
+_issue_url = '' # example: https://newrepublic.com/magazine/may-2023
def sort_section(a, b, sections_sort):
try:
- a_index = sections_sort.index(a["section"])
+ a_index = sections_sort.index(a['section'])
except ValueError:
a_index = 999
try:
- b_index = sections_sort.index(b["section"])
+ b_index = sections_sort.index(b['section'])
except ValueError:
b_index = 999
@@ -27,31 +27,31 @@ def sort_section(a, b, sections_sort):
return -1
if a_index > b_index:
return 1
- if a["section"] == b["section"]:
- return -1 if a["date"] < b["date"] else 1
- return -1 if a["section"] < b["section"] else 1
+ if a['section'] == b['section']:
+ return -1 if a['date'] < b['date'] else 1
+ return -1 if a['section'] < b['section'] else 1
class NewRepublicMagazine(BasicNewsRecipe):
- title = "The New Republic Magazine"
- language = "en"
- __author__ = "ping"
+ title = 'The New Republic Magazine'
+ language = 'en'
+ __author__ = 'ping'
description = (
- "Founded in 1914, The New Republic is a media organization dedicated to addressing "
- "today’s most critical issues. https://newrepublic.com/magazine"
+ 'Founded in 1914, The New Republic is a media organization dedicated to addressing '
+ 'today’s most critical issues. https://newrepublic.com/magazine'
)
- publication_type = "magazine"
+ publication_type = 'magazine'
use_embedded_content = False
- masthead_url = "https://images.newrepublic.com/f5acdc0030e3212e601040dd24d5c2c0c684b15f.png?w=512&q=65&dpi=1&fit=crop&crop=faces&h=256"
- remove_attributes = ["height", "width"]
- ignore_duplicate_articles = {"title", "url"}
+ masthead_url = 'https://images.newrepublic.com/f5acdc0030e3212e601040dd24d5c2c0c684b15f.png?w=512&q=65&dpi=1&fit=crop&crop=faces&h=256'
+ remove_attributes = ['height', 'width']
+ ignore_duplicate_articles = {'title', 'url'}
remove_empty_feeds = True
compress_news_images_auto_size = 6
requires_version = (5, 0, 0)
- BASE_URL = "https://newrepublic.com"
+ BASE_URL = 'https://newrepublic.com'
- extra_css = """
+ extra_css = '''
h1.headline { margin-bottom: 0.4rem; }
h2.subheadline { font-style: italic; margin-bottom: 1rem; font-weight: normal; }
.article-meta { margin-bottom: 1rem; }
@@ -64,15 +64,15 @@ class NewRepublicMagazine(BasicNewsRecipe):
}
.lede-media .caption, .article-embed .caption { font-size: 0.8rem; }
div.author-bios { margin-top: 2rem; font-style: italic; border-top: solid 1px dimgray; }
- """
+ '''
def _article_endpoint(self, nid):
- """
+ '''
Graphql endpoint to fetch full article
:param nid:
:return:
- """
- query = """
+ '''
+ query = '''
query ($id: ID, $nid: ID) {
Article(id: $id, nid: $nid) {
...ArticlePageFields
@@ -157,12 +157,12 @@ fragment ArticlePageFields on Article {
slug
label
}
-}"""
- params = {"query": query, "variables": json.dumps({"nid": str(nid)})}
- return f"https://newrepublic.com/graphql?{urlencode(params)}"
+}'''
+ params = {'query': query, 'variables': json.dumps({'nid': str(nid)})}
+ return f'https://newrepublic.com/graphql?{urlencode(params)}'
def _resize_image(self, image_url, width, height):
- """
+ '''
Rewrite the image url to fetch a device appropriate sized one instead
of the full-res one
@@ -170,76 +170,76 @@ fragment ArticlePageFields on Article {
:param width:
:param height:
:return:
- """
+ '''
crop_params = {
- "auto": "compress",
- "ar": f"{width}:{height}",
- "fm": "jpg",
- "fit": "crop",
- "crop": "faces",
- "ixlib": "react-9.0.2",
- "dpr": 1,
- "q": 65,
- "w": self.scale_news_images[0] if self.scale_news_images else 800,
+ 'auto': 'compress',
+ 'ar': f'{width}:{height}',
+ 'fm': 'jpg',
+ 'fit': 'crop',
+ 'crop': 'faces',
+ 'ixlib': 'react-9.0.2',
+ 'dpr': 1,
+ 'q': 65,
+ 'w': self.scale_news_images[0] if self.scale_news_images else 800,
}
url_tuple = urlsplit(image_url)
- return f"{url_tuple.scheme}://{url_tuple.netloc}{url_tuple.path}?{urlencode(crop_params)}"
+ return f'{url_tuple.scheme}://{url_tuple.netloc}{url_tuple.path}?{urlencode(crop_params)}'
def populate_article_metadata(self, article, soup, first):
# pick up the og link from preprocess_raw_html() and set it as url instead of the api endpoint
- og_link = soup.select("[data-og-link]")
+ og_link = soup.select('[data-og-link]')
if og_link:
- article.url = og_link[0]["data-og-link"]
+ article.url = og_link[0]['data-og-link']
def preprocess_raw_html(self, raw_html, url):
# formulate the api response into html
- article = json.loads(raw_html)["data"]["Article"]
+ article = json.loads(raw_html)['data']['Article']
# Example: 2022-08-12T10:00:00.000Z
- date_published_loc = parse_date(article["publishedAt"])
+ date_published_loc = parse_date(article['publishedAt'])
# authors
- author_bios_html = ""
+ author_bios_html = ''
post_authors = []
try:
- post_authors = [a["name"] for a in article.get("authors", [])]
+ post_authors = [a['name'] for a in article.get('authors', [])]
if post_authors:
- author_bios_html = "".join(
- [a.get("blurb", "") for a in article.get("authors", [])]
+ author_bios_html = ''.join(
+ [a.get('blurb', '') for a in article.get('authors', [])]
)
author_bios_html = f'{author_bios_html}
'
except (KeyError, TypeError):
pass
# lede image
- lede_image_html = ""
- if article.get("ledeImage"):
- img = article["ledeImage"]
+ lede_image_html = ''
+ if article.get('ledeImage'):
+ img = article['ledeImage']
lede_img_url = self._resize_image(
- urljoin(self.BASE_URL, img["src"]), img["width"], img["height"]
+ urljoin(self.BASE_URL, img['src']), img['width'], img['height']
)
- lede_image_caption = ""
- if article.get("ledeImageRealCaption"):
+ lede_image_caption = ''
+ if article.get('ledeImageRealCaption'):
lede_image_caption = (
f'{article["ledeImageRealCaption"]}>/span>'
)
- lede_image_html = f"""
+ lede_image_html = f'''
{lede_image_caption}
-
"""
+
'''
- body_soup = BeautifulSoup(article["body"], features="html.parser")
- for img in body_soup.find_all("img", attrs={"data-serialized": True}):
+ body_soup = BeautifulSoup(article['body'], features='html.parser')
+ for img in body_soup.find_all('img', attrs={'data-serialized': True}):
try:
- img_info = json.loads(img["data-serialized"])
+ img_info = json.loads(img['data-serialized'])
img_src = self._resize_image(
- urljoin(self.BASE_URL, img_info["src"]),
- img_info["width"],
- img_info["height"],
+ urljoin(self.BASE_URL, img_info['src']),
+ img_info['width'],
+ img_info['height'],
)
- img["src"] = img_src
- del img["data-serialized"]
+ img['src'] = img_src
+ del img['data-serialized']
except:
pass
- return f"""
+ return f'''
{article["cleanTitle"]}
@@ -255,34 +255,34 @@ fragment ArticlePageFields on Article {
{str(body_soup)}
{author_bios_html}
- """
+ '''
def parse_index(self):
br = self.get_browser()
- params = ""
+ params = ''
if _issue_url:
- month = urlparse(_issue_url).path.split("/")[-1]
+ month = urlparse(_issue_url).path.split('/')[-1]
params = f'?{urlencode({"magazineTag": month})}'
- res = br.open_novisit(f"https://newrepublic.com/api/content/magazine{params}")
- magazine = json.loads(res.read().decode("utf-8"))["data"]
+ res = br.open_novisit(f'https://newrepublic.com/api/content/magazine{params}')
+ magazine = json.loads(res.read().decode('utf-8'))['data']
self.log.debug(f'Found issue: {magazine["metaData"]["issueTag"]["text"]}')
self.timefmt = f': {magazine["metaData"]["issueTag"]["text"]}'
- self.cover_url = urljoin(self.BASE_URL, magazine["metaData"]["image"]["src"])
+ self.cover_url = urljoin(self.BASE_URL, magazine['metaData']['image']['src'])
feed_articles = []
for k, articles in magazine.items():
- if not (k.startswith("magazine") and articles):
+ if not (k.startswith('magazine') and articles):
continue
try:
for article in articles:
self.log.debug(f'Found article: {article["title"]}')
feed_articles.append(
{
- "url": self._article_endpoint(article["nid"]),
- "title": article["title"].replace("\n", " "),
- "description": article.get("deck", ""),
- "date": article["publishedAt"],
- "section": k[len("magazine") :],
+ 'url': self._article_endpoint(article['nid']),
+ 'title': article['title'].replace('\n', ' '),
+ 'description': article.get('deck', ''),
+ 'date': article['publishedAt'],
+ 'section': k[len('magazine') :],
}
)
except TypeError:
@@ -290,24 +290,24 @@ fragment ArticlePageFields on Article {
pass
sort_sections = [
- "Cover",
- "Editorsnote",
- "Features",
- "StateOfTheNation",
- "ResPublica",
- "Columns",
- "Upfront",
- "Backstory",
- "SignsAndWonders",
- "Usandtheworld",
- "Booksandthearts",
- "Poetry",
- "Exposure",
+ 'Cover',
+ 'Editorsnote',
+ 'Features',
+ 'StateOfTheNation',
+ 'ResPublica',
+ 'Columns',
+ 'Upfront',
+ 'Backstory',
+ 'SignsAndWonders',
+ 'Usandtheworld',
+ 'Booksandthearts',
+ 'Poetry',
+ 'Exposure',
]
sort_category_key = cmp_to_key(lambda a, b: sort_section(a, b, sort_sections))
return [
(
- magazine["metaData"]["issueTag"]["text"],
+ magazine['metaData']['issueTag']['text'],
sorted(feed_articles, key=sort_category_key),
)
]
diff --git a/recipes/news24.recipe b/recipes/news24.recipe
index 94b9979ba2..a5c4e77d06 100644
--- a/recipes/news24.recipe
+++ b/recipes/news24.recipe
@@ -3,7 +3,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1375900744(BasicNewsRecipe):
title = u'News24'
- description = "News24."
+ description = 'News24.'
__author__ = 'Nicki de Wet'
publisher = 'Media24'
category = 'news, politics, South Africa'
@@ -17,10 +17,10 @@ class AdvancedUserRecipe1375900744(BasicNewsRecipe):
remove_empty_feeds = True
publication_type = 'newsportal'
masthead_url = 'http://www.24.com/images/widgethead_news.png'
- extra_css = """
+ extra_css = '''
body{font-family: Arial,Helvetica,sans-serif }
img{display: block}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/news_busters.recipe b/recipes/news_busters.recipe
index 7d8ab2d80e..dea3e7ad17 100644
--- a/recipes/news_busters.recipe
+++ b/recipes/news_busters.recipe
@@ -7,7 +7,7 @@ class NewsBusters(BasicNewsRecipe):
__author__ = 'jde'
oldest_article = 1 # day
max_articles_per_feed = 100
- cover_url = "http://newsbusters.org/sites/all/themes/genesis_nb/images/nb-mrc.png"
+ cover_url = 'http://newsbusters.org/sites/all/themes/genesis_nb/images/nb-mrc.png'
language = 'en'
encoding = 'utf8'
needs_subscription = False
diff --git a/recipes/newsweek_polska.recipe b/recipes/newsweek_polska.recipe
index af69641762..ce34da87eb 100644
--- a/recipes/newsweek_polska.recipe
+++ b/recipes/newsweek_polska.recipe
@@ -71,31 +71,31 @@ class Newsweek(BasicNewsRecipe):
strong = p.find('strong')
if strong:
newest = re.compile(
- "Tekst pochodzi z najnowszego numeru Tygodnika Newsweek")
+ 'Tekst pochodzi z najnowszego numeru Tygodnika Newsweek')
if newest.search(str(strong)):
strong.extract()
continue
itunes = p.find('a')
if itunes:
- reurl = re.compile("itunes.apple.com")
+ reurl = re.compile('itunes.apple.com')
if reurl.search(str(itunes['href'])):
p.extract()
continue
imagedesc = p.find('div', attrs={'class': 'image-desc'})
if imagedesc:
- redesc = re.compile("Okładka numeru")
+ redesc = re.compile('Okładka numeru')
if (redesc.search(str(imagedesc))):
p.extract()
continue
# get actual contents
for content in article_div.contents:
- strs.append("".join(str(content)))
+ strs.append(''.join(str(content)))
# return contents as a string
- return u"".join(strs)
+ return u''.join(strs)
#
# Articles can be divided into several pages, this method parses them recursevely
@@ -108,7 +108,7 @@ class Newsweek(BasicNewsRecipe):
matches = re.search(r'(.*)', source, re.DOTALL)
if matches is None:
- print("no article tag found, returning...")
+ print('no article tag found, returning...')
return
main_section = BeautifulSoup(matches.group(0))
diff --git a/recipes/nezavisne_novine.recipe b/recipes/nezavisne_novine.recipe
index 9ab93d928d..11b8da10af 100644
--- a/recipes/nezavisne_novine.recipe
+++ b/recipes/nezavisne_novine.recipe
@@ -24,10 +24,10 @@ class NezavisneNovine(BasicNewsRecipe):
cover_url = strftime(
'http://pdf.nezavisne.com/slika/novina/nezavisne_novine.jpg?v=%Y%m%d')
masthead_url = 'http://www.nezavisne.com/slika/osnova/nezavisne-novine-logo.gif'
- extra_css = """
+ extra_css = '''
body{font-family: Arial,Helvetica,sans-serif }
img{margin-bottom: 0.4em; display:block}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/nikkei_news.recipe b/recipes/nikkei_news.recipe
index d193deaf06..28dfdff1c9 100644
--- a/recipes/nikkei_news.recipe
+++ b/recipes/nikkei_news.recipe
@@ -26,67 +26,67 @@ class NikkeiNet_paper_subscription(BasicNewsRecipe):
masthead_url = 'http://cdn.nikkei.co.jp/parts/ds/images/common/st_nikkei_r1_20101003_1.gif'
cover_margins = (10, 188, '#ffffff')
- remove_tags_before = {'class': "cmn-indent"}
+ remove_tags_before = {'class': 'cmn-indent'}
remove_tags = [
# {'class':"cmn-article_move"},
# {'class':"cmn-pr_list"},
# {'class':"cmnc-zoom"},
- {'class': "cmn-hide"},
+ {'class': 'cmn-hide'},
{'name': 'form'},
{'class': 'cmn-print_headline cmn-clearfix'},
{'id': 'ABOUT_NIKKEI'},
]
- remove_tags_after = {'class': "cmn-indent"}
+ remove_tags_after = {'class': 'cmn-indent'}
def get_browser(self):
br = BasicNewsRecipe.get_browser(self)
if self.username is not None and self.password is not None:
- print("-------------------------open top page-------------------------------------")
+ print('-------------------------open top page-------------------------------------')
br.open('http://www.nikkei.com/')
- print("-------------------------open first login form-----------------------------")
+ print('-------------------------open first login form-----------------------------')
try:
url = list(br.links(
- url_regex="www.nikkei.com/etc/accounts/login"))[0].url
+ url_regex='www.nikkei.com/etc/accounts/login'))[0].url
except IndexError:
- print("Found IndexError")
+ print('Found IndexError')
url = 'http://www.nikkei.com/etc/accounts/login?dps=3&pageflag=top&url=http%3A%2F%2Fwww.nikkei.com%2F'
except StopIteration:
url = 'http://www.nikkei.com/etc/accounts/login?dps=3&pageflag=top&url=http%3A%2F%2Fwww.nikkei.com%2F'
br.open(url)
- print("-------------------------JS redirect(send autoPostForm)--------------------")
+ print('-------------------------JS redirect(send autoPostForm)--------------------')
br.select_form(name='autoPostForm')
br.submit()
# response = br.response()
- print("-------------------------got login form------------------------------------")
+ print('-------------------------got login form------------------------------------')
br.select_form(name='LA7010Form01')
br['LA7010Form01:LA7010Email'] = self.username
br['LA7010Form01:LA7010Password'] = self.password
br.submit(id='LA7010Form01:submitBtn')
- print("-------------------------JS redirect---------------------------------------")
+ print('-------------------------JS redirect---------------------------------------')
br.select_form(nr=0)
br.submit()
return br
def cleanup(self):
- print("-------------------------logout--------------------------------------------")
+ print('-------------------------logout--------------------------------------------')
self.browser.open('https://regist.nikkei.com/ds/etc/accounts/logout')
def parse_index(self):
- print("-------------------------get index of paper--------------------------------")
+ print('-------------------------get index of paper--------------------------------')
result = []
soup = self.index_to_soup('http://www.nikkei.com/paper/')
- sections = soup.findAll(attrs={'class': re.compile(".*cmn-article_title.*")})
+ sections = soup.findAll(attrs={'class': re.compile('.*cmn-article_title.*')})
for sect in sections:
- sect_title = sect.find(attrs={'class' : re.compile(".*cmnc-((large)|(middle)|(small)).*")})
+ sect_title = sect.find(attrs={'class' : re.compile('.*cmnc-((large)|(middle)|(small)).*')})
if sect_title is None:
continue
sect_title = sect_title.contents[0]
sect_result = []
url = sect.a['href']
- url = re.sub("/article/", "/print-article/", url)
+ url = re.sub('/article/', '/print-article/', url)
url = 'http://www.nikkei.com' + url
sect_result.append(dict(title=sect_title, url=url, date='',description='', content=''))
result.append([sect_title, sect_result])
@@ -95,11 +95,11 @@ class NikkeiNet_paper_subscription(BasicNewsRecipe):
def populate_article_metadata(self, article, soup, first):
try:
elms = soup.findAll(
- 'div', {"class": "cmn-article_text JSID_key_fonttxt"})
+ 'div', {'class': 'cmn-article_text JSID_key_fonttxt'})
elm_text = u'◆'.join(
[self.tag_to_string(elm).strip() for elm in elms])
elm_text = unicodedata.normalize('NFKC', elm_text)
article.summary = article.text_summary = elm_text
except:
- self.log("Error: Failed to get article summary.")
+ self.log('Error: Failed to get article summary.')
return
diff --git a/recipes/nikkeiasia.recipe b/recipes/nikkeiasia.recipe
index c9c55c88b4..5a725bb808 100644
--- a/recipes/nikkeiasia.recipe
+++ b/recipes/nikkeiasia.recipe
@@ -29,12 +29,12 @@ class Nikkei(BasicNewsRecipe):
encoding = 'utf-8'
use_embedded_content = False
- extra_css = """
+ extra_css = '''
.subhead { font-style:italic; color:#202020; }
em, blockquote { color:#202020; }
.sec, .byline { font-size:small; font-weight:bold; }
.article__image, .article__caption { font-size:small; text-align:center; }
- """
+ '''
recipe_specific_options = {
'date': {'short': 'The edition date (YYYY-MM-DD format)', 'long': '2024-09-19'}
diff --git a/recipes/njuz_net.recipe b/recipes/njuz_net.recipe
index 7db58348d5..bd9ae24395 100644
--- a/recipes/njuz_net.recipe
+++ b/recipes/njuz_net.recipe
@@ -23,12 +23,12 @@ class NjuzNet(BasicNewsRecipe):
language = 'sr'
publication_type = 'newsportal'
masthead_url = 'http://www.njuz.net/njuznet.jpg'
- extra_css = """
+ extra_css = '''
@font-face {font-family: "serif1";src:url(res:///opt/sony/ebook/FONT/tt0011m_.ttf)}
body{font-family: serif1, serif}
.articledescription{font-family: serif1, serif}
.wp-caption-text{font-size: x-small}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/novilist_novine_hr.recipe b/recipes/novilist_novine_hr.recipe
index 6939447607..eeb3c1e6f6 100644
--- a/recipes/novilist_novine_hr.recipe
+++ b/recipes/novilist_novine_hr.recipe
@@ -27,14 +27,14 @@ class NoviList_hr(BasicNewsRecipe):
needs_subscription = True
masthead_url = 'http://novine.novilist.hr/images/system/novilist-logo.jpg'
index = 'http://novine.novilist.hr/'
- extra_css = """
+ extra_css = '''
@font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)}
body{font-family: Geneva,Arial,Helvetica,Swiss,sans1,sans-serif }
img{display:block; margin-bottom: 0.4em; margin-top: 0.4em}
.nadnaslov,.podnaslov{font-size: small; display: block; margin-bottom: 1em}
.naslov{font-size: x-large; color: maroon; font-weight: bold; display: block; margin-bottom: 1em;}
p{display: block}
- """
+ '''
preprocess_regexps = [(re.compile(u'\u0110'), lambda match: u'\u00D0')]
diff --git a/recipes/novosti.recipe b/recipes/novosti.recipe
index e889a3fc46..2c926ab51b 100644
--- a/recipes/novosti.recipe
+++ b/recipes/novosti.recipe
@@ -24,12 +24,12 @@ class Novosti(BasicNewsRecipe):
language = 'sr'
publication_type = 'newspaper'
masthead_url = 'http://www.novosti.rs/images/basic/logo-print.png'
- extra_css = """ @font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)}
+ extra_css = ''' @font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)}
.article_description,body{font-family: Arial,Helvetica,sans1,sans-serif}
.author{font-size: small}
.articleLead{font-size: large; font-weight: bold}
img{display: block; margin-bottom: 1em; margin-top: 1em}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language, 'pretty_print': True
diff --git a/recipes/nrc.nl.recipe b/recipes/nrc.nl.recipe
index a03fb6269e..4f5b29fffc 100644
--- a/recipes/nrc.nl.recipe
+++ b/recipes/nrc.nl.recipe
@@ -39,7 +39,7 @@ class NRC(BasicNewsRecipe):
),
dict(name=['script', 'noscript', 'style']),
]
- remove_attributes = ["class", "id", "name", "style"]
+ remove_attributes = ['class', 'id', 'name', 'style']
encoding = 'utf-8'
no_stylesheets = True
ignore_duplicate_articles = {'url'}
@@ -52,8 +52,8 @@ class NRC(BasicNewsRecipe):
title_regexp = None
@staticmethod
- def _monthly_list_url(date, fmt="%Y/%m/"):
- return "https://www.nrc.nl/de/data/NH/" + date.strftime(fmt)
+ def _monthly_list_url(date, fmt='%Y/%m/'):
+ return 'https://www.nrc.nl/de/data/NH/' + date.strftime(fmt)
def _clean_article_title(self, title):
if not title:
@@ -62,7 +62,7 @@ class NRC(BasicNewsRecipe):
self.title_regexp = re.compile(
r'([^<]+)\s*'
)
- return self.title_regexp.sub(r"\1 ", title)
+ return self.title_regexp.sub(r'\1 ', title)
def parse_index(self):
sections = []
@@ -88,43 +88,43 @@ class NRC(BasicNewsRecipe):
issues = json.loads(r.read())
if len(issues) > 0:
issue_date = datetime.datetime.strptime(
- issues[0]["published_at"], "%Y-%m-%dT%H:%M:%SZ"
+ issues[0]['published_at'], '%Y-%m-%dT%H:%M:%SZ'
)
- issue_url = self._monthly_list_url(issue_date, "%Y/%m/%d/")
- self.frontpage = issues[0]["frontpage"]
+ issue_url = self._monthly_list_url(issue_date, '%Y/%m/%d/')
+ self.frontpage = issues[0]['frontpage']
break
if issue_url is None:
return []
with closing(self.browser.open(Request(issue_url, None, headers))) as r:
edition = json.loads(r.read())
documents = {}
- for headline in edition["paperheadlines"]:
- item = headline["item"]
- documents[headline["document_id"]] = dict(
- url=item["full_url"],
- headline=self._clean_article_title(item["headline"])
+ for headline in edition['paperheadlines']:
+ item = headline['item']
+ documents[headline['document_id']] = dict(
+ url=item['full_url'],
+ headline=self._clean_article_title(item['headline'])
)
- for section in edition["sections"]:
+ for section in edition['sections']:
articles = []
- for doc in section["document_ids"]:
+ for doc in section['document_ids']:
if doc not in documents:
self.log.warn('Document not found:', doc)
continue
articles.append(
dict(
- title=documents[doc]["headline"], url=documents[doc]["url"]
+ title=documents[doc]['headline'], url=documents[doc]['url']
)
)
- sections.append((section["name"], articles))
+ sections.append((section['name'], articles))
return sections
def preprocess_html(self, soup):
for tag in soup():
if tag.name == 'img':
if tag.has_attr('data-src-medium'):
- tag['src'] = tag['data-src-medium'].split("|")[0]
+ tag['src'] = tag['data-src-medium'].split('|')[0]
elif tag.has_attr('data-src'):
- tag['src'] = tag['data-src'].split("|")[0]
+ tag['src'] = tag['data-src'].split('|')[0]
if tag['src'].startswith('//'):
tag['src'] = 'https:' + tag['src']
elif tag['src'].startswith('/'):
diff --git a/recipes/nrc_next.recipe b/recipes/nrc_next.recipe
index 42886190d2..bc6e73e5d3 100644
--- a/recipes/nrc_next.recipe
+++ b/recipes/nrc_next.recipe
@@ -62,7 +62,7 @@ class NRCNext(BasicNewsRecipe):
zfile = zipfile.ZipFile(BytesIO(epubraw), 'r')
zfile.extractall(self.output_dir)
namelist = zfile.namelist()
- emre = re.compile("<em(?:.*)>(.*)</em>")
+ emre = re.compile('<em(?:.*)>(.*)</em>')
subst = '\\1'
for name in namelist:
_, ext = os.path.splitext(name)
diff --git a/recipes/nspm.recipe b/recipes/nspm.recipe
index 2b89b3a604..f533f4e5fc 100644
--- a/recipes/nspm.recipe
+++ b/recipes/nspm.recipe
@@ -33,13 +33,13 @@ class Nspm(BasicNewsRecipe):
remove_empty_feeds = True
publication_type = 'magazine'
masthead_url = 'http://www.nspm.rs/templates/jsn_epic_pro/images/logol.jpg'
- extra_css = """ @font-face {font-family: "serif1";src:url(res:///opt/sony/ebook/FONT/tt0011m_.ttf)}
+ extra_css = ''' @font-face {font-family: "serif1";src:url(res:///opt/sony/ebook/FONT/tt0011m_.ttf)}
@font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)}
body{font-family: "Times New Roman", serif1, serif}
.article_description{font-family: Arial, sans1, sans-serif}
img{margin-top:0.5em; margin-bottom: 0.7em; display: block}
.author{color: #990000; font-weight: bold}
- .author,.createdate{font-size: 0.9em} """
+ .author,.createdate{font-size: 0.9em} '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language, 'pretty_print': True
diff --git a/recipes/nspm_int.recipe b/recipes/nspm_int.recipe
index a562a05628..9309c88bfb 100644
--- a/recipes/nspm_int.recipe
+++ b/recipes/nspm_int.recipe
@@ -22,12 +22,12 @@ class Nspm_int(BasicNewsRecipe):
delay = 2
publication_type = 'magazine'
masthead_url = 'http://www.nspm.rs/templates/jsn_epic_pro/images/logol.jpg'
- extra_css = """
+ extra_css = '''
body{font-family: "Times New Roman", serif}
.article_description{font-family: Arial, sans-serif}
img{margin-top:0.5em; margin-bottom: 0.7em}
.author{color: #990000; font-weight: bold}
- .author,.createdate{font-size: 0.9em} """
+ .author,.createdate{font-size: 0.9em} '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language, 'linearize_tables': True
diff --git a/recipes/nyt_magazine.recipe b/recipes/nyt_magazine.recipe
index 52c4a38b0d..e548976fcc 100644
--- a/recipes/nyt_magazine.recipe
+++ b/recipes/nyt_magazine.recipe
@@ -66,7 +66,7 @@ class NytMag(BasicNewsRecipe):
if c.lower() == 'yes':
self.compress_news_images = True
- extra_css = """
+ extra_css = '''
.byl, .time { font-size:small; color:#202020; }
.cap { font-size:small; text-align:center; }
.cred { font-style:italic; font-size:small; }
@@ -74,7 +74,7 @@ class NytMag(BasicNewsRecipe):
.sc { font-variant: small-caps; }
.lbl { font-size:small; color:#404040; }
img { display:block; margin:0 auto; }
- """
+ '''
@property
def nyt_parser(self):
diff --git a/recipes/nyt_tmag.recipe b/recipes/nyt_tmag.recipe
index b4b0a25a18..718a37adcb 100644
--- a/recipes/nyt_tmag.recipe
+++ b/recipes/nyt_tmag.recipe
@@ -66,7 +66,7 @@ class NytMag(BasicNewsRecipe):
if c.lower() == 'yes':
self.compress_news_images = True
- extra_css = """
+ extra_css = '''
.byl, .time { font-size:small; color:#202020; }
.cap { font-size:small; text-align:center; }
.cred { font-style:italic; font-size:small; }
@@ -74,7 +74,7 @@ class NytMag(BasicNewsRecipe):
.sc { font-variant: small-caps; }
.lbl { font-size:small; color:#404040; }
img { display:block; margin:0 auto; }
- """
+ '''
@property
def nyt_parser(self):
diff --git a/recipes/nytfeeds.recipe b/recipes/nytfeeds.recipe
index bf05758c1e..136e707ec1 100644
--- a/recipes/nytfeeds.recipe
+++ b/recipes/nytfeeds.recipe
@@ -92,7 +92,7 @@ class NytFeeds(BasicNewsRecipe):
if c.lower() == 'yes':
self.compress_news_images = True
- extra_css = """
+ extra_css = '''
.byl, .time { font-size:small; color:#202020; }
.cap { font-size:small; text-align:center; }
.cred { font-style:italic; font-size:small; }
@@ -100,7 +100,7 @@ class NytFeeds(BasicNewsRecipe):
.sc { font-variant: small-caps; }
.lbl { font-size:small; color:#404040; }
img { display:block; margin:0 auto; }
- """
+ '''
@property
def nyt_parser(self):
diff --git a/recipes/nytimes.recipe b/recipes/nytimes.recipe
index fb42463da6..7d81bb0d15 100644
--- a/recipes/nytimes.recipe
+++ b/recipes/nytimes.recipe
@@ -87,7 +87,7 @@ class NewYorkTimes(BasicNewsRecipe):
is_web_edition = True
oldest_web_edition_article = 7 # days
- extra_css = """
+ extra_css = '''
.byl, .time { font-size:small; color:#202020; }
.cap { font-size:small; text-align:center; }
.cred { font-style:italic; font-size:small; }
@@ -95,7 +95,7 @@ class NewYorkTimes(BasicNewsRecipe):
.sc { font-variant: small-caps; }
.lbl { font-size:small; color:#404040; }
img { display:block; margin:0 auto; }
- """
+ '''
@property
def nyt_parser(self):
diff --git a/recipes/nytimes_sports.recipe b/recipes/nytimes_sports.recipe
index 8086ca729a..2e752483c6 100644
--- a/recipes/nytimes_sports.recipe
+++ b/recipes/nytimes_sports.recipe
@@ -6,7 +6,7 @@ from __future__ import with_statement
__license__ = 'GPL 3'
__copyright__ = 'zotzo'
__docformat__ = 'restructuredtext en'
-"""
+'''
http://fifthdown.blogs.nytimes.com/
http://offthedribble.blogs.nytimes.com/
http://thequad.blogs.nytimes.com/
@@ -16,7 +16,7 @@ http://bats.blogs.nytimes.com/
http://straightsets.blogs.nytimes.com/
http://formulaone.blogs.nytimes.com/
http://onpar.blogs.nytimes.com/
-"""
+'''
from calibre.web.feeds.news import BasicNewsRecipe
diff --git a/recipes/nytimes_sub.recipe b/recipes/nytimes_sub.recipe
index d069d1545d..ef0dd2157d 100644
--- a/recipes/nytimes_sub.recipe
+++ b/recipes/nytimes_sub.recipe
@@ -87,7 +87,7 @@ class NewYorkTimes(BasicNewsRecipe):
is_web_edition = False
oldest_web_edition_article = 7 # days
- extra_css = """
+ extra_css = '''
.byl, .time { font-size:small; color:#202020; }
.cap { font-size:small; text-align:center; }
.cred { font-style:italic; font-size:small; }
@@ -95,7 +95,7 @@ class NewYorkTimes(BasicNewsRecipe):
.sc { font-variant: small-caps; }
.lbl { font-size:small; color:#404040; }
img { display:block; margin:0 auto; }
- """
+ '''
@property
def nyt_parser(self):
diff --git a/recipes/nytimes_tech.recipe b/recipes/nytimes_tech.recipe
index a7d9009f06..5d76fcd862 100644
--- a/recipes/nytimes_tech.recipe
+++ b/recipes/nytimes_tech.recipe
@@ -60,7 +60,7 @@ class NytTech(BasicNewsRecipe):
if c.lower() == 'yes':
self.compress_news_images = True
- extra_css = """
+ extra_css = '''
.byl, .time { font-size:small; color:#202020; }
.cap { font-size:small; text-align:center; }
.cred { font-style:italic; font-size:small; }
@@ -68,7 +68,7 @@ class NytTech(BasicNewsRecipe):
.sc { font-variant: small-caps; }
.lbl { font-size:small; color:#404040; }
img { display:block; margin:0 auto; }
- """
+ '''
@property
def nyt_parser(self):
diff --git a/recipes/nytimesbook.recipe b/recipes/nytimesbook.recipe
index fb9e3558c2..df8eddc39a 100644
--- a/recipes/nytimesbook.recipe
+++ b/recipes/nytimesbook.recipe
@@ -24,7 +24,7 @@ class NewYorkTimesBookReview(BasicNewsRecipe):
ignore_duplicate_articles = {'title', 'url'}
encoding = 'utf-8'
- extra_css = """
+ extra_css = '''
.byl, .time { font-size:small; color:#202020; }
.cap { font-size:small; text-align:center; }
.cred { font-style:italic; font-size:small; }
@@ -32,7 +32,7 @@ class NewYorkTimesBookReview(BasicNewsRecipe):
.sc { font-variant: small-caps; }
.lbl { font-size:small; color:#404040; }
img { display:block; margin:0 auto; }
- """
+ '''
articles_are_obfuscated = use_wayback_machine
diff --git a/recipes/observatorul_cultural.recipe b/recipes/observatorul_cultural.recipe
index 4c91f8859e..304024a53c 100644
--- a/recipes/observatorul_cultural.recipe
+++ b/recipes/observatorul_cultural.recipe
@@ -32,7 +32,7 @@ class ObservatorulCultural(BasicNewsRecipe):
soup = self.index_to_soup(
'http://www.observatorcultural.ro/Arhiva*-archive.html')
issueTag = soup.find('a', href=re.compile(
- "observatorcultural.ro\\/Numarul"))
+ 'observatorcultural.ro\\/Numarul'))
issueURL = issueTag['href']
print(issueURL)
issueSoup = self.index_to_soup(issueURL)
diff --git a/recipes/observer_gb.recipe b/recipes/observer_gb.recipe
index dc49dc2d23..7a844061de 100644
--- a/recipes/observer_gb.recipe
+++ b/recipes/observer_gb.recipe
@@ -27,18 +27,18 @@ class Guardian(BasicNewsRecipe):
keep_only_tags = [
dict(name='div', attrs={
- 'id': ["content", "article_header", "main-article-info", ]}),
+ 'id': ['content', 'article_header', 'main-article-info', ]}),
]
remove_tags = [
dict(name='div', attrs={
- 'class': ["video-content", "videos-third-column"]}),
+ 'class': ['video-content', 'videos-third-column']}),
dict(name='div', attrs={
- 'id': ["article-toolbox", "subscribe-feeds", ]}),
+ 'id': ['article-toolbox', 'subscribe-feeds', ]}),
dict(name='div', attrs={
- 'class': ["promo-component bookshop-books-promo bookshop-books"]}),
- dict(name='ul', attrs={'class': ["pagination"]}),
- dict(name='ul', attrs={'id': ["content-actions"]}),
- dict(name='li', attrs={'id': ["product-image"]}),
+ 'class': ['promo-component bookshop-books-promo bookshop-books']}),
+ dict(name='ul', attrs={'class': ['pagination']}),
+ dict(name='ul', attrs={'id': ['content-actions']}),
+ dict(name='li', attrs={'id': ['product-image']}),
]
use_embedded_content = False
diff --git a/recipes/oc_register.recipe b/recipes/oc_register.recipe
index 5e489b404d..ec3179bab9 100644
--- a/recipes/oc_register.recipe
+++ b/recipes/oc_register.recipe
@@ -36,18 +36,18 @@ class OrangeCountyRegister(BasicNewsRecipe):
def parsePage(self, index):
if self.debugMessages is True:
- print("\n\nStarting " + self.feeds[index][0])
+ print('\n\nStarting ' + self.feeds[index][0])
articleList = []
soup = self.index_to_soup(self.feeds[index][1])
# Have this index page now.
# look for a.article-title
# If any, the description is
- for newsentry in soup.findAll("a", {"class": "article-title"}):
+ for newsentry in soup.findAll('a', {'class': 'article-title'}):
print('Next up:')
print(newsentry)
- title = newsentry["title"]
+ title = newsentry['title']
url = newsentry['href']
- print("Title: ")
+ print('Title: ')
print(title)
print('URL')
print(url)
@@ -66,19 +66,19 @@ class OrangeCountyRegister(BasicNewsRecipe):
def extract_readable_article(self, html, url):
cleanedHTML = super(OrangeCountyRegister,
self).extract_readable_article(html, url)
- print("Processing html for author")
+ print('Processing html for author')
# Find the attribs...
attribDict = self.htmlToAttribsDict(html)
- print("dict is type...")
+ print('dict is type...')
print(type(attribDict))
author = attribDict.get('Byline')
if author is not None:
# add author code after
- print("Adding author in meta")
+ print('Adding author in meta')
print(author)
cleanedHTML = cleanedHTML.replace(
- "",
- "\n
\n"
+ '',
+ '\n
\n'
)
else:
print('no author found')
@@ -92,7 +92,7 @@ class OrangeCountyRegister(BasicNewsRecipe):
def htmlToAttribsDict(self, rawHTML):
tokenStart = 'dataLayer.push({'
tokenEnd = '});'
- print("1")
+ print('1')
startJSON = rawHTML.find(tokenStart)
if (startJSON < 0):
return
@@ -101,13 +101,13 @@ class OrangeCountyRegister(BasicNewsRecipe):
if (endJSON < 0):
return
JSON = JSONBeginning[:endJSON + 1]
- JSONQuoted = JSON.replace("'", "\"")
+ JSONQuoted = JSON.replace("'", '"')
try:
metadata = json.loads(JSONQuoted)
pprint(metadata)
return metadata
except ValueError:
- print("Could not decode JSON:")
+ print('Could not decode JSON:')
print(JSONQuoted)
return None
diff --git a/recipes/omgubuntu.recipe b/recipes/omgubuntu.recipe
index 9242c5c683..0da184ad37 100644
--- a/recipes/omgubuntu.recipe
+++ b/recipes/omgubuntu.recipe
@@ -5,8 +5,8 @@ from calibre.web.feeds.news import BasicNewsRecipe
class OMGUbuntu(BasicNewsRecipe):
- title = u"Omg! Ubuntu!"
- description = u"Online news site covering Ubuntu activities. Recipe pulls articles from past 7 days."
+ title = u'Omg! Ubuntu!'
+ description = u'Online news site covering Ubuntu activities. Recipe pulls articles from past 7 days.'
language = 'en'
oldest_article = 7
max_articles_per_feed = 100
diff --git a/recipes/orient_21.recipe b/recipes/orient_21.recipe
index 6b06434e79..94ac39e363 100644
--- a/recipes/orient_21.recipe
+++ b/recipes/orient_21.recipe
@@ -32,9 +32,9 @@ class OrientXXIRecipe(BasicNewsRecipe):
'''
def default_cover(self, cover_file):
- """
+ '''
Crée une couverture personnalisée avec le logo
- """
+ '''
from qt.core import QColor, QFont, QImage, QPainter, QPen, QRect, Qt
from calibre.gui2 import ensure_app, load_builtin_fonts, pixmap_to_data
@@ -50,7 +50,7 @@ class OrientXXIRecipe(BasicNewsRecipe):
weekday = french_weekday[wkd]
month = french_month[today.month]
- date_str = f"{weekday} {today.day} {month} {today.year}"
+ date_str = f'{weekday} {today.day} {month} {today.year}'
edition = today.strftime('Édition de %Hh')
# Image de base
diff --git a/recipes/ottawa_citizen.recipe b/recipes/ottawa_citizen.recipe
index 7b0b152767..0154d55758 100644
--- a/recipes/ottawa_citizen.recipe
+++ b/recipes/ottawa_citizen.recipe
@@ -164,24 +164,24 @@ class CanWestPaper(BasicNewsRecipe):
continue
break
if daysback == 7:
- self.log("\nCover unavailable")
+ self.log('\nCover unavailable')
cover = None
return cover
def fixChars(self, string):
# Replace lsquo (\x91)
- fixed = re.sub("\x91", "‘", string)
+ fixed = re.sub('\x91', '‘', string)
# Replace rsquo (\x92)
- fixed = re.sub("\x92", "’", fixed)
+ fixed = re.sub('\x92', '’', fixed)
# Replace ldquo (\x93)
- fixed = re.sub("\x93", "“", fixed)
+ fixed = re.sub('\x93', '“', fixed)
# Replace rdquo (\x94)
- fixed = re.sub("\x94", "”", fixed)
+ fixed = re.sub('\x94', '”', fixed)
# Replace ndash (\x96)
- fixed = re.sub("\x96", "–", fixed)
+ fixed = re.sub('\x96', '–', fixed)
# Replace mdash (\x97)
- fixed = re.sub("\x97", "—", fixed)
- fixed = re.sub("’", "’", fixed)
+ fixed = re.sub('\x97', '—', fixed)
+ fixed = re.sub('’', '’', fixed)
return fixed
def massageNCXText(self, description):
@@ -262,10 +262,10 @@ class CanWestPaper(BasicNewsRecipe):
if url.startswith('/'):
url = self.url_prefix + url
if not url.startswith(self.url_prefix):
- print("Rejected " + url)
+ print('Rejected ' + url)
return
if url in self.url_list:
- print("Rejected dup " + url)
+ print('Rejected dup ' + url)
return
self.url_list.append(url)
title = self.tag_to_string(atag, False)
@@ -277,8 +277,8 @@ class CanWestPaper(BasicNewsRecipe):
return
dtag = adiv.find('div', 'content')
description = ''
- print("URL " + url)
- print("TITLE " + title)
+ print('URL ' + url)
+ print('TITLE ' + title)
if dtag is not None:
stag = dtag.span
if stag is not None:
@@ -286,18 +286,18 @@ class CanWestPaper(BasicNewsRecipe):
description = self.tag_to_string(stag, False)
else:
description = self.tag_to_string(dtag, False)
- print("DESCRIPTION: " + description)
+ print('DESCRIPTION: ' + description)
if key not in articles:
articles[key] = []
articles[key].append(dict(
title=title, url=url, date='', description=description, author='', content=''))
def parse_web_index(key, keyurl):
- print("Section: " + key + ': ' + self.url_prefix + keyurl)
+ print('Section: ' + key + ': ' + self.url_prefix + keyurl)
try:
soup = self.index_to_soup(self.url_prefix + keyurl)
except:
- print("Section: " + key + ' NOT FOUND')
+ print('Section: ' + key + ' NOT FOUND')
return
ans.append(key)
mainsoup = soup.find('div', 'bodywrapper')
diff --git a/recipes/outlook_india.recipe b/recipes/outlook_india.recipe
index 71f40a34c8..0f7128ee92 100644
--- a/recipes/outlook_india.recipe
+++ b/recipes/outlook_india.recipe
@@ -8,7 +8,7 @@ class outlook(BasicNewsRecipe):
__author__ = 'unkn0wn'
description = (
'Outlook covers the latest India news, analysis, business news and long-form stories on culture,'
- ' money market and personal finance. Read India\'s best online magazine.'
+ " money market and personal finance. Read India's best online magazine."
)
language = 'en_IN'
use_embedded_content = False
diff --git a/recipes/pagina12.recipe b/recipes/pagina12.recipe
index 54ab8ae358..0bff247ba1 100644
--- a/recipes/pagina12.recipe
+++ b/recipes/pagina12.recipe
@@ -33,7 +33,7 @@ class Pagina12(BasicNewsRecipe):
articles_are_obfuscated = True
temp_files = []
fetch_retries = 10
- extra_css = """
+ extra_css = '''
body{font-family: "Open Sans", sans-serif}
.article-date{font-size: small; margin-bottom: 1em;}
.article-title{font-size: x-large; font-weight: bold; display: block; margin-bottom: 1em; margin-top: 1em;}
@@ -43,7 +43,7 @@ class Pagina12(BasicNewsRecipe):
img{margin-top:1em; margin-bottom: 1em; display:block}
.article-text p:first-letter{display: inline; font-size: xx-large; font-weight: bold}
.article-prefix{font-family: "Archivo Narrow",Helvetica,sans-serif; font-size: small; text-transform: uppercase;}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
@@ -112,6 +112,6 @@ class Pagina12(BasicNewsRecipe):
self.temp_files.append(tfile)
result = tfile.name
except:
- self.info("Retrying download...")
+ self.info('Retrying download...')
count += 1
return result
diff --git a/recipes/parool.recipe b/recipes/parool.recipe
index fd674d1522..9fea4d9d8d 100644
--- a/recipes/parool.recipe
+++ b/recipes/parool.recipe
@@ -26,7 +26,7 @@ class Parool(BasicNewsRecipe):
dict(attrs={'data-element-id': ['article-element-authors']}),
dict(name=['script', 'noscript', 'style']),
]
- remove_attributes = ["class", "id", "name", "style"]
+ remove_attributes = ['class', 'id', 'name', 'style']
encoding = 'utf-8'
no_stylesheets = True
ignore_duplicate_articles = {'url'}
@@ -50,7 +50,7 @@ class Parool(BasicNewsRecipe):
teaser_label = self.tag_to_string(header.find('h4').find('span', attrs={'class': 'teaser__label'})).strip()
teaser_sublabel = self.tag_to_string(header.find('h4').find('span', attrs={'class': 'teaser__sublabel'})).strip()
teaser_title = self.tag_to_string(header.find('h3').find('span', attrs={'class': 'teaser__title__value--short'})).strip()
- ignore = { "dirkjan", "s1ngle", "pukkels", "hein de kort" }
+ ignore = { 'dirkjan', 's1ngle', 'pukkels', 'hein de kort' }
if teaser_label.lower() in ignore:
continue
parts = []
@@ -74,13 +74,13 @@ class Parool(BasicNewsRecipe):
if tag['src'][0] == '/':
tag['src'] = 'https://www.parool.nl' + tag['src']
for tag in soup():
- if tag.name == "picture":
- tag.replaceWith(tag.find("img"))
+ if tag.name == 'picture':
+ tag.replaceWith(tag.find('img'))
comic_articles = {
- "Alle strips van Dirkjan",
- "S1NGLE",
- "Pukkels",
- "Bekijk hier alle cartoons van Hein de Kort",
+ 'Alle strips van Dirkjan',
+ 'S1NGLE',
+ 'Pukkels',
+ 'Bekijk hier alle cartoons van Hein de Kort',
}
if self.tag_to_string(soup.find('h1')).strip() in comic_articles:
for node in soup.find('figure').find_next_siblings():
@@ -93,8 +93,8 @@ class Parool(BasicNewsRecipe):
'Accept': 'application/json, text/javascript, */*; q=0.01',
'DNT': '1',
}
- url = "https://login-api.e-pages.dk/v1/krant.parool.nl/folders"
+ url = 'https://login-api.e-pages.dk/v1/krant.parool.nl/folders'
with closing(self.browser.open(Request(url, None, headers))) as r:
folders = json.loads(r.read())
- return folders["objects"][0]["teaser_medium"]
+ return folders['objects'][0]['teaser_medium']
return None
diff --git a/recipes/pecat.recipe b/recipes/pecat.recipe
index 6426d36221..4d70704a99 100644
--- a/recipes/pecat.recipe
+++ b/recipes/pecat.recipe
@@ -25,11 +25,11 @@ class Pecat_rs(BasicNewsRecipe):
ignore_duplicate_articles = {'url'}
needs_subscription = 'optional'
publication_type = 'magazine'
- extra_css = """
+ extra_css = '''
body{font-family: Arial,Helvetica,sans1,sans-serif}
img{display: block; margin-bottom: 1em; margin-top: 1em}
p{display: block; margin-bottom: 1em; margin-top: 1em}
- """
+ '''
conversion_options = {
'comment': description, 'tags': 'politika, Srbija', 'publisher': 'Pecat', 'language': language
diff --git a/recipes/people_daily.recipe b/recipes/people_daily.recipe
index 4ad18a436c..e20d070a83 100644
--- a/recipes/people_daily.recipe
+++ b/recipes/people_daily.recipe
@@ -89,11 +89,11 @@ class AdvancedUserRecipe1277129332(BasicNewsRecipe):
# dict(name='p'),
# ]
remove_tags = [
- dict(name='div', class_="channel cf")
+ dict(name='div', class_='channel cf')
]
- remove_tags_before = [dict(name='div', class_="layout rm_txt cf")]
- remove_tags_after = [dict(name='div', class_="edit cf")]
+ remove_tags_before = [dict(name='div', class_='layout rm_txt cf')]
+ remove_tags_after = [dict(name='div', class_='edit cf')]
def append_page(self, soup, appendtag, position):
pager = soup.find('img', attrs={'src': '/img/next_b.gif'})
@@ -135,6 +135,6 @@ class AdvancedUserRecipe1277129332(BasicNewsRecipe):
try:
br.open(cover)
except:
- self.log("\nCover unavailable: " + cover)
+ self.log('\nCover unavailable: ' + cover)
cover = None
return cover
diff --git a/recipes/pescanik.recipe b/recipes/pescanik.recipe
index 365d74724e..157bb4b7e2 100644
--- a/recipes/pescanik.recipe
+++ b/recipes/pescanik.recipe
@@ -24,11 +24,11 @@ class Pescanik(BasicNewsRecipe):
language = 'sr'
publication_type = 'newsportal'
masthead_url = 'http://pescanik.net/wp-content/uploads/2011/10/logo1.png'
- extra_css = """
+ extra_css = '''
@font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)}
body{font-family: Verdana,Arial,Tahoma,sans1,sans-serif}
#BlogTitle{font-size: xx-large; font-weight: bold}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/politiko_dk.recipe b/recipes/politiko_dk.recipe
index 5d5228c6c3..82a602eb8e 100644
--- a/recipes/politiko_dk.recipe
+++ b/recipes/politiko_dk.recipe
@@ -26,10 +26,10 @@ class PolitikoDK(BasicNewsRecipe):
auto_cleanup = False
keep_only_tags = [
- dict(name="h1", attrs={'class': 'article-headline'}),
- dict(name="p", attrs={'class': 'article-summary'}),
- dict(name="div", attrs={'class': 'article-date'}),
- dict(name="div", attrs={'class': 'article-content'}),
+ dict(name='h1', attrs={'class': 'article-headline'}),
+ dict(name='p', attrs={'class': 'article-summary'}),
+ dict(name='div', attrs={'class': 'article-date'}),
+ dict(name='div', attrs={'class': 'article-content'}),
]
# Feed are found here: http://www.b.dk/rss
diff --git a/recipes/portafolio.recipe b/recipes/portafolio.recipe
index ffdc530f85..df17cfb4dc 100644
--- a/recipes/portafolio.recipe
+++ b/recipes/portafolio.recipe
@@ -20,12 +20,12 @@ class AdvancedUserRecipe1311799898(BasicNewsRecipe):
masthead_url = 'http://www.portafolio.co/sites/portafolio.co/themes/portafolio_2011/logo.png'
publication_type = 'newspaper'
- extra_css = """
+ extra_css = '''
p{text-align: justify; font-size: 100%}
body{ text-align: left; font-size:100% }
h1{font-family: sans-serif; font-size:150%; font-weight:bold; text-align: justify; }
h3{font-family: sans-serif; font-size:100%; font-style: italic; text-align: justify; }
- """
+ '''
feeds = [(u'Negocios', u'http://www.portafolio.co/negocios/feed'),
(u'Economia', u'http://www.portafolio.co/economia/feed'),
diff --git a/recipes/pravda_por.recipe b/recipes/pravda_por.recipe
index 274576b4ee..31b88dec59 100644
--- a/recipes/pravda_por.recipe
+++ b/recipes/pravda_por.recipe
@@ -22,10 +22,10 @@ class Pravda_port(BasicNewsRecipe):
remove_empty_feeds = True
publication_type = 'newspaper'
masthead_url = 'http://port.pravda.ru/pix/logo.gif'
- extra_css = """
+ extra_css = '''
body{font-family: Arial,sans-serif }
img{margin-bottom: 0.4em; display:block}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/presse_portal.recipe b/recipes/presse_portal.recipe
index eda9430236..e998e33e7c 100644
--- a/recipes/presse_portal.recipe
+++ b/recipes/presse_portal.recipe
@@ -24,7 +24,7 @@ class PressePortalDE(BasicNewsRecipe):
# add date to description so for dayly downloads you can find them easier
# ---- can be edit by user
description = description + ' fetched: ' + \
- datetime.now().strftime("%Y-%m-%d") # %H:%M:%S")
+ datetime.now().strftime('%Y-%m-%d') # %H:%M:%S")
# Who published the content?
publisher = u'Presseportal.de'
# What is the content of?
diff --git a/recipes/private_eye.recipe b/recipes/private_eye.recipe
index 5881eb5b90..4f0076e06b 100644
--- a/recipes/private_eye.recipe
+++ b/recipes/private_eye.recipe
@@ -77,10 +77,10 @@ class PrivateEyeRecipe(BasicNewsRecipe):
try:
day, month, year = next_issue_text.split(' ')
day = ''.join(c for c in day if c.isdigit())
- pub_date = datetime.strptime(" ".join((day, month, year)), "%d %B %Y") - timedelta(12)
+ pub_date = datetime.strptime(' '.join((day, month, year)), '%d %B %Y') - timedelta(12)
self.log('pub-date:', pub_date)
- self.conversion_options.update({'pubdate': datetime.strftime(pub_date, "%d %B %Y").lstrip("0")})
- title = self.title + " " + datetime.strftime(pub_date, "%Y-%m-%d")
+ self.conversion_options.update({'pubdate': datetime.strftime(pub_date, '%d %B %Y').lstrip('0')})
+ title = self.title + ' ' + datetime.strftime(pub_date, '%Y-%m-%d')
self.conversion_options.update({'title': title})
self.conversion_options.update({'title_sort': title})
except (TypeError, ValueError):
@@ -124,13 +124,13 @@ class PrivateEyeRecipe(BasicNewsRecipe):
def preprocess_html(self, soup):
# Remove
tag link to crossword image
for tag in soup.findAll('a', {'href': re.compile(r'/pictures/crossword/')}):
- self.log("Removing link to crossword image...")
+ self.log('Removing link to crossword image...')
tag.unwrap()
# Remove align tag in crossword image (so float right works)
for tag in soup.findAll('img', {'src': re.compile(r'/pictures/crossword/')}):
- if "align" in tag.attrs:
- self.log("Removing crossword image align attribute...")
+ if 'align' in tag.attrs:
+ self.log('Removing crossword image align attribute...')
del tag.attrs['align']
return soup
@@ -138,10 +138,10 @@ class PrivateEyeRecipe(BasicNewsRecipe):
# We remove vast swathes of HTML which is not part of the articles.
# Remove sibling content
remove_tags_before = [
- {'name': 'div', 'class': "article"},
- {'name': 'div', 'id': "page"},
- {'name': 'div', 'id': "page-wide"},
- {'name': 'div', 'id': "content"},
+ {'name': 'div', 'class': 'article'},
+ {'name': 'div', 'id': 'page'},
+ {'name': 'div', 'id': 'page-wide'},
+ {'name': 'div', 'id': 'content'},
{'name': 'a', ' attrs': {'href': 'https://shop.private-eye.co.uk'}},
]
remove_tags_after = remove_tags_before.copy()
diff --git a/recipes/pro_physik.recipe b/recipes/pro_physik.recipe
index 8c945069fb..869f5f0462 100644
--- a/recipes/pro_physik.recipe
+++ b/recipes/pro_physik.recipe
@@ -45,7 +45,7 @@ class AdvancedUserRecipe1303841067(BasicNewsRecipe):
]
remove_tags = [
- dict(name='ul', attrs={'class':["wj-share-buttons"]}), #Block social media
+ dict(name='ul', attrs={'class':['wj-share-buttons']}), #Block social media
]
feeds = [
diff --git a/recipes/prospectmaguk_free.recipe b/recipes/prospectmaguk_free.recipe
index c37513a90d..b0018324df 100644
--- a/recipes/prospectmaguk_free.recipe
+++ b/recipes/prospectmaguk_free.recipe
@@ -8,47 +8,47 @@ from urllib.parse import urljoin
from calibre.web.feeds.news import BasicNewsRecipe, prefixed_classes
-_issue_url = ""
+_issue_url = ''
class ProspectMagazineUKFree(BasicNewsRecipe):
- title = "Prospect Magazine (Free)"
- __author__ = "ping"
+ title = 'Prospect Magazine (Free)'
+ __author__ = 'ping'
description = (
- "Prospect is Britain’s leading current affairs monthly magazine. "
- "It is an independent and eclectic forum for writing and thinking—in "
- "print and online. Published every month with two double issues in "
- "the summer and winter, it spans politics, science, foreign affairs, "
- "economics, the environment, philosophy and the arts."
+ 'Prospect is Britain’s leading current affairs monthly magazine. '
+ 'It is an independent and eclectic forum for writing and thinking—in '
+ 'print and online. Published every month with two double issues in '
+ 'the summer and winter, it spans politics, science, foreign affairs, '
+ 'economics, the environment, philosophy and the arts.'
)
- language = "en_GB"
- category = "news, UK"
- publication_type = "magazine"
- masthead_url = "https://media.prospectmagazine.co.uk/prod/images/gm_grid_thumbnail/358ffc17208c-f4c3cddcdeda-prospect-masthead.png"
- encoding = "utf-8"
+ language = 'en_GB'
+ category = 'news, UK'
+ publication_type = 'magazine'
+ masthead_url = 'https://media.prospectmagazine.co.uk/prod/images/gm_grid_thumbnail/358ffc17208c-f4c3cddcdeda-prospect-masthead.png'
+ encoding = 'utf-8'
remove_javascript = True
no_stylesheets = True
- ignore_duplicate_articles = {"url"}
- INDEX = "https://www.prospectmagazine.co.uk/issues"
+ ignore_duplicate_articles = {'url'}
+ INDEX = 'https://www.prospectmagazine.co.uk/issues'
- keep_only_tags = [dict(class_="prop-book-article-panel_main")]
+ keep_only_tags = [dict(class_='prop-book-article-panel_main')]
remove_tags = [
dict(
class_=[
- "prop-book-review-header-wrapper_magazine",
- "prop-mobile-social-share_header",
- "prop-magazine-link-block",
- "pros-article-body__img-credit",
- "pros-article-topics__wrapper",
- "pros-article-author__image-wrapper",
- "prop-book-review-promo_details-buy-mobile",
+ 'prop-book-review-header-wrapper_magazine',
+ 'prop-mobile-social-share_header',
+ 'prop-magazine-link-block',
+ 'pros-article-body__img-credit',
+ 'pros-article-topics__wrapper',
+ 'pros-article-author__image-wrapper',
+ 'prop-book-review-promo_details-buy-mobile',
]
),
- dict(id=["disqus_thread", "newsletter_wrapper"]),
- prefixed_classes("dfp-slot-"),
+ dict(id=['disqus_thread', 'newsletter_wrapper']),
+ prefixed_classes('dfp-slot-'),
]
- extra_css = """
+ extra_css = '''
h1 { font-size: 1.8rem; margin-bottom: 0.4rem; }
.prop-book-review-header-wrapper_standfirst { font-size: 1.2rem; font-style: italic; font-weight: normal; margin-bottom: 0.5rem; }
.prop-book-review-header-wrapper_details { margin-top: 1rem; margin-bottom: 1rem; }
@@ -62,23 +62,23 @@ class ProspectMagazineUKFree(BasicNewsRecipe):
.pullquote, blockquote { text-align: center; margin-left: 0; margin-bottom: 0.4rem; font-size: 1.25rem; }
.prop-book-review-article_author { margin: 1.5rem 0; font-style: italic; }
.prop-book-review-promo { margin-bottom: 1rem; }
- """
+ '''
def preprocess_html(self, soup):
# re-position lede image
- lede_img = soup.find("img", class_="prop-book-review-header-wrapper_image")
- meta = soup.find("div", class_="prop-book-review-header-wrapper_details")
+ lede_img = soup.find('img', class_='prop-book-review-header-wrapper_image')
+ meta = soup.find('div', class_='prop-book-review-header-wrapper_details')
if lede_img and meta:
lede_img = lede_img.extract()
meta.insert_after(lede_img)
- for img in soup.find_all("img", attrs={"data-src": True}):
- img["src"] = img["data-src"]
- del img["data-src"]
+ for img in soup.find_all('img', attrs={'data-src': True}):
+ img['src'] = img['data-src']
+ del img['data-src']
- for byline_link in soup.find_all("a", attrs={"data-author-name": True}):
+ for byline_link in soup.find_all('a', attrs={'data-author-name': True}):
byline_link.unwrap()
- for author_link in soup.find_all("a", class_="pros-article-author"):
+ for author_link in soup.find_all('a', class_='pros-article-author'):
author_link.unwrap()
return soup
@@ -87,39 +87,39 @@ class ProspectMagazineUKFree(BasicNewsRecipe):
if not _issue_url:
issues_soup = self.index_to_soup(self.INDEX)
curr_issue_a_ele = issues_soup.find(
- "a", class_="pros-collection-landing__item"
+ 'a', class_='pros-collection-landing__item'
)
- curr_issue_url = urljoin(self.INDEX, curr_issue_a_ele["href"])
+ curr_issue_url = urljoin(self.INDEX, curr_issue_a_ele['href'])
else:
curr_issue_url = _issue_url
soup = self.index_to_soup(curr_issue_url)
issue_name = (
- self.tag_to_string(soup.find(class_="magazine-lhc__issue-name"))
- .replace(" issue", "")
+ self.tag_to_string(soup.find(class_='magazine-lhc__issue-name'))
+ .replace(' issue', '')
.strip()
)
- self.timefmt = f" [{issue_name}]"
+ self.timefmt = f' [{issue_name}]'
- self.cover_url = soup.find("img", class_="magazine-lhc__cover-image")[
- "data-src"
- ].replace("portrait_small_fit", "portrait_large_fit")
+ self.cover_url = soup.find('img', class_='magazine-lhc__cover-image')[
+ 'data-src'
+ ].replace('portrait_small_fit', 'portrait_large_fit')
articles = OrderedDict()
- sections = soup.find_all("div", class_="pro-magazine-section")
+ sections = soup.find_all('div', class_='pro-magazine-section')
for section in sections:
section_name = self.tag_to_string(
- section.find(class_="pro-magazine-section__name")
+ section.find(class_='pro-magazine-section__name')
)
for sect_article in section.find_all(
- class_="pro-magazine-section__article"
+ class_='pro-magazine-section__article'
):
articles.setdefault(section_name, []).append(
{
- "url": urljoin(self.INDEX, sect_article.find("a")["href"]),
- "title": self.tag_to_string(
+ 'url': urljoin(self.INDEX, sect_article.find('a')['href']),
+ 'title': self.tag_to_string(
sect_article.find(
- class_="pro-magazine-section__article-headline"
+ class_='pro-magazine-section__article-headline'
)
),
}
diff --git a/recipes/psych.recipe b/recipes/psych.recipe
index b5353a4f1e..b07ed764d5 100644
--- a/recipes/psych.recipe
+++ b/recipes/psych.recipe
@@ -5,10 +5,10 @@ from calibre.web.feeds.recipes import BasicNewsRecipe
def absurl(url):
- if url.startswith("//"):
- return "https:" + url
- if url.startswith("/"):
- return "https://www.psychologytoday.com" + url
+ if url.startswith('//'):
+ return 'https:' + url
+ if url.startswith('/'):
+ return 'https://www.psychologytoday.com' + url
return url
diff --git a/recipes/quanta_magazine.recipe b/recipes/quanta_magazine.recipe
index 404b7d5a50..aa9d7567fd 100644
--- a/recipes/quanta_magazine.recipe
+++ b/recipes/quanta_magazine.recipe
@@ -7,25 +7,25 @@ from calibre.web.feeds.news import BasicNewsRecipe
class Quanta(BasicNewsRecipe):
- title = "Quanta Magazine"
+ title = 'Quanta Magazine'
__author__ = 'lui1'
- description = "Articles from the magazine. Please set to download weekly."
+ description = 'Articles from the magazine. Please set to download weekly.'
oldest_article = 7
max_articles_per_feed = 100
language = 'en'
encoding = 'UTF-8'
- publication_type = "blog"
- cover_url = "https://d2r55xnwy6nx47.cloudfront.net/uploads/2017/05/logo.png"
+ publication_type = 'blog'
+ cover_url = 'https://d2r55xnwy6nx47.cloudfront.net/uploads/2017/05/logo.png'
feeds = [
('Articles', 'https://api.quantamagazine.org/feed/'),
]
keep_only_tags = [
- dict(name="div", attrs={"id": "postBody"}),
+ dict(name='div', attrs={'id': 'postBody'}),
]
remove_tags = [
- dict(name="div", attrs={"class": "post__sidebar__content"}),
+ dict(name='div', attrs={'class': 'post__sidebar__content'}),
]
diff --git a/recipes/queueacmorg.recipe b/recipes/queueacmorg.recipe
index e58878f435..deb7053020 100644
--- a/recipes/queueacmorg.recipe
+++ b/recipes/queueacmorg.recipe
@@ -11,17 +11,17 @@ ACM Queue Magazine
class QueueAcmOrg(BasicNewsRecipe):
- title = "ACM Queue Magazine"
+ title = 'ACM Queue Magazine'
__author__ = 'yodha8'
- description = "Queue is the ACM magazine for practicing software engineers. Published once every 2 months. Example: Jan-Feb."
+ description = 'Queue is the ACM magazine for practicing software engineers. Published once every 2 months. Example: Jan-Feb.'
oldest_article = 60
max_articles_per_feed = 50
auto_cleanup = True
language = 'en'
- cover_url = "https://queue.acm.org/img/acmqueue_logo.gif"
+ cover_url = 'https://queue.acm.org/img/acmqueue_logo.gif'
feeds = [
- ("All Queue Content", "https://queue.acm.org/rss/feeds/queuecontent.xml"),
+ ('All Queue Content', 'https://queue.acm.org/rss/feeds/queuecontent.xml'),
]
def get_cover_url(self):
diff --git a/recipes/readitlater.recipe b/recipes/readitlater.recipe
index 43c8c8211c..5a45b93596 100644
--- a/recipes/readitlater.recipe
+++ b/recipes/readitlater.recipe
@@ -1,6 +1,6 @@
-"""
+'''
Pocket Calibre Recipe v1.5
-"""
+'''
import json
import operator
@@ -69,22 +69,22 @@ class Pocket(BasicNewsRecipe):
br['password'] = self.password
br.submit()
else:
- self.user_error("This Recipe requires authentication")
+ self.user_error('This Recipe requires authentication')
return br
def get_auth_uri(self):
- """Quick function to return the authentication part of the url"""
- uri = ""
+ '''Quick function to return the authentication part of the url'''
+ uri = ''
uri = u'{0}&apikey={1!s}'.format(uri, self.apikey)
if self.username is None or self.password is None:
- self.user_error("Username or password is blank.")
+ self.user_error('Username or password is blank.')
else:
uri = u'{0}&username={1!s}'.format(uri, self.username)
uri = u'{0}&password={1!s}'.format(uri, self.password)
return uri
def get_pull_articles_uri(self):
- uri = ""
+ uri = ''
uri = u'{0}&state={1}'.format(uri, u'unread')
uri = u'{0}&contentType={1}'.format(uri, u'article')
uri = u'{0}&sort={1}'.format(uri, self.sort_method)
@@ -95,7 +95,7 @@ class Pocket(BasicNewsRecipe):
def parse_index(self):
pocket_feed = []
- fetch_url = u"{0}?{1}{2}".format(
+ fetch_url = u'{0}?{1}{2}'.format(
self.read_api_url,
self.get_auth_uri(),
self.get_pull_articles_uri()
@@ -106,7 +106,7 @@ class Pocket(BasicNewsRecipe):
if len(pocket_feed) < self.minimum_articles:
self.mark_as_read_after_dl = False
self.user_error(
- "Only {0} articles retrieved, minimum_articles not reached".format(len(pocket_feed)))
+ 'Only {0} articles retrieved, minimum_articles not reached'.format(len(pocket_feed)))
for pocket_article in pocket_feed.items():
self.articles.append({
@@ -119,7 +119,7 @@ class Pocket(BasicNewsRecipe):
'sort': pocket_article[1]['sort_id']
})
self.articles = sorted(self.articles, key=operator.itemgetter('sort'))
- return [("My Pocket Articles for {0}".format(strftime('[%I:%M %p]')), self.articles)]
+ return [('My Pocket Articles for {0}'.format(strftime('[%I:%M %p]')), self.articles)]
def mark_as_read(self, mark_list):
actions_list = []
diff --git a/recipes/real_clear.recipe b/recipes/real_clear.recipe
index a2031e8b7b..4a36bf6b68 100644
--- a/recipes/real_clear.recipe
+++ b/recipes/real_clear.recipe
@@ -35,17 +35,17 @@ class RealClear(BasicNewsRecipe):
# Numeric parameter is type, controls whether we look for
feedsets = [
- ["Politics", "http://www.realclearpolitics.com/index.xml", 0],
- ["Policy", "http://www.realclearpolicy.com/index.xml", 0],
- ["Science", "http://www.realclearscience.com/index.xml", 0],
- ["Tech", "http://www.realcleartechnology.com/index.xml", 0],
+ ['Politics', 'http://www.realclearpolitics.com/index.xml', 0],
+ ['Policy', 'http://www.realclearpolicy.com/index.xml', 0],
+ ['Science', 'http://www.realclearscience.com/index.xml', 0],
+ ['Tech', 'http://www.realcleartechnology.com/index.xml', 0],
# The feedburner is essentially the same as the top feed, politics.
# ["Politics Burner", "http://feeds.feedburner.com/realclearpolitics/qlMj", 1],
# ["Commentary", "http://feeds.feedburner.com/Realclearpolitics-Articles", 1],
- ["Markets Home", "http://www.realclearmarkets.com/index.xml", 0],
- ["Markets", "http://www.realclearmarkets.com/articles/index.xml", 0],
- ["World", "http://www.realclearworld.com/index.xml", 0],
- ["World Blog", "http://www.realclearworld.com/blog/index.xml", 2]
+ ['Markets Home', 'http://www.realclearmarkets.com/index.xml', 0],
+ ['Markets', 'http://www.realclearmarkets.com/articles/index.xml', 0],
+ ['World', 'http://www.realclearworld.com/index.xml', 0],
+ ['World Blog', 'http://www.realclearworld.com/blog/index.xml', 2]
]
# Hints to extractPrintURL.
# First column is the URL snippet. Then the string to search for as text,
@@ -53,13 +53,13 @@ class RealClear(BasicNewsRecipe):
# drill down.
phUrlSnip, phLinkText, phMainSearch, phHrefSearch = range(4)
- printhints = [["realclear", "", '', 'printpage'],
- ["billoreilly.com", "Print this entry", 'a', ''],
- ["billoreilly.com", "Print This Article", 'a', ''],
- ["politico.com", "Print",
+ printhints = [['realclear', '', '', 'printpage'],
+ ['billoreilly.com', 'Print this entry', 'a', ''],
+ ['billoreilly.com', 'Print This Article', 'a', ''],
+ ['politico.com', 'Print',
'a', 'share-print'],
- ["nationalreview.com", ">Print<", 'a', ''],
- ["reason.com", "", 'a', 'printer']
+ ['nationalreview.com', '>Print<', 'a', ''],
+ ['reason.com', '', 'a', 'printer']
# The following are not supported due to JavaScripting, and would require obfuscated_article to handle
# forbes,
# usatoday - just prints with all current crap anyhow
@@ -82,12 +82,12 @@ class RealClear(BasicNewsRecipe):
def extractPrintURL(self, pageURL):
tagURL = pageURL
baseParse = urlparse(pageURL)
- baseURL = baseParse[0] + "://" + baseParse[1]
+ baseURL = baseParse[0] + '://' + baseParse[1]
hintsCount = len(self.printhints)
for x in range(0, hintsCount):
if pageURL.find(self.printhints[x][0]) == -1:
continue
- print("Trying " + self.printhints[x][0])
+ print('Trying ' + self.printhints[x][0])
# Only retrieve the soup if we have a match to check for the
# printed article with.
soup = self.index_to_soup(pageURL)
@@ -96,51 +96,51 @@ class RealClear(BasicNewsRecipe):
if len(self.printhints[x][self.phHrefSearch]) > 0 and len(self.printhints[x][self.phLinkText]) == 0:
# e.g. RealClear
if self.debugMessages is True:
- print("Search by href: " +
+ print('Search by href: ' +
self.printhints[x][self.phHrefSearch])
printFind = soup.find(href=re.compile(
self.printhints[x][self.phHrefSearch]))
elif len(self.printhints[x][3]) > 0 and len(self.printhints[x][1]) == 0:
if self.debugMessages is True:
- print("Search 1: " +
- self.printhints[x][2] + " Attributes: ")
+ print('Search 1: ' +
+ self.printhints[x][2] + ' Attributes: ')
print(self.printhints[x][3])
printFind = soup.find(
self.printhints[x][2], attrs=self.printhints[x][3])
elif len(self.printhints[x][3]) > 0:
if self.debugMessages is True:
- print("search2")
+ print('search2')
printFind = soup.find(self.printhints[x][2], attrs=self.printhints[
x][3], text=self.printhints[x][1])
else:
if self.debugMessages is True:
print(
- "Default Search: " + self.printhints[x][2] + " Text: " + self.printhints[x][1])
+ 'Default Search: ' + self.printhints[x][2] + ' Text: ' + self.printhints[x][1])
printFind = soup.find(
self.printhints[x][2], text=self.printhints[x][1])
if printFind is None:
if self.debugMessages is True:
- print("Not Found")
+ print('Not Found')
# print(soup)
- print("end soup\n\n")
+ print('end soup\n\n')
continue
print(printFind)
if isinstance(printFind, NavigableString) is False:
if printFind['href'] is not None:
- print("Check " + printFind['href'] +
- " for base of " + baseURL)
- if printFind['href'].find("http") != 0:
+ print('Check ' + printFind['href'] +
+ ' for base of ' + baseURL)
+ if printFind['href'].find('http') != 0:
return baseURL + printFind['href']
return printFind['href']
tag = printFind.parent
print(tag)
if tag.get('href', None) is None:
if self.debugMessages is True:
- print("Not in parent, trying skip-up")
+ print('Not in parent, trying skip-up')
if tag.parent['href'] is None:
if self.debugMessages is True:
- print("Not in skip either, aborting")
+ print('Not in skip either, aborting')
continue
return tag.parent['href']
return tag['href']
@@ -148,45 +148,45 @@ class RealClear(BasicNewsRecipe):
def get_browser(self):
if self.debugMessages is True:
- print("In get_browser")
+ print('In get_browser')
br = BasicNewsRecipe.get_browser(self)
return br
def parseRSS(self, index):
if self.debugMessages is True:
- print("\n\nStarting " + self.feedsets[index][0])
+ print('\n\nStarting ' + self.feedsets[index][0])
articleList = []
soup = self.index_to_soup(self.feedsets[index][1])
- for div in soup.findAll("item"):
- title = div.find("title").contents[0]
- urlEl = div.find("originalLink")
+ for div in soup.findAll('item'):
+ title = div.find('title').contents[0]
+ urlEl = div.find('originalLink')
if urlEl is None or len(urlEl.contents) == 0:
- urlEl = div.find("originallink")
+ urlEl = div.find('originallink')
if urlEl is None or len(urlEl.contents) == 0:
- urlEl = div.find("link")
+ urlEl = div.find('link')
if urlEl is None or len(urlEl.contents) == 0:
- urlEl = div.find("guid")
+ urlEl = div.find('guid')
if urlEl is None or title is None or len(urlEl.contents) == 0:
- print("Error in feed " + self.feedsets[index][0])
+ print('Error in feed ' + self.feedsets[index][0])
print(div)
continue
print(title)
print(urlEl)
- url = urlEl.contents[0].encode("utf-8")
- description = div.find("description")
+ url = urlEl.contents[0].encode('utf-8')
+ description = div.find('description')
if description is not None and description.contents is not None and len(description.contents) > 0:
description = description.contents[0]
else:
- description = "None"
- pubDateEl = div.find("pubDate")
+ description = 'None'
+ pubDateEl = div.find('pubDate')
if pubDateEl is None:
- pubDateEl = div.find("pubdate")
+ pubDateEl = div.find('pubdate')
if pubDateEl is None:
pubDate = time.strftime('%a, %d %b')
else:
pubDate = pubDateEl.contents[0]
if self.debugMessages is True:
- print("Article")
+ print('Article')
print(title)
print(description)
print(pubDate)
diff --git a/recipes/regina_leader_post.recipe b/recipes/regina_leader_post.recipe
index d3ff8ef484..d9b37fc9bc 100644
--- a/recipes/regina_leader_post.recipe
+++ b/recipes/regina_leader_post.recipe
@@ -123,24 +123,24 @@ class CanWestPaper(BasicNewsRecipe):
continue
break
if daysback == 7:
- self.log("\nCover unavailable")
+ self.log('\nCover unavailable')
cover = None
return cover
def fixChars(self, string):
# Replace lsquo (\x91)
- fixed = re.sub("\x91", "‘", string)
+ fixed = re.sub('\x91', '‘', string)
# Replace rsquo (\x92)
- fixed = re.sub("\x92", "’", fixed)
+ fixed = re.sub('\x92', '’', fixed)
# Replace ldquo (\x93)
- fixed = re.sub("\x93", "“", fixed)
+ fixed = re.sub('\x93', '“', fixed)
# Replace rdquo (\x94)
- fixed = re.sub("\x94", "”", fixed)
+ fixed = re.sub('\x94', '”', fixed)
# Replace ndash (\x96)
- fixed = re.sub("\x96", "–", fixed)
+ fixed = re.sub('\x96', '–', fixed)
# Replace mdash (\x97)
- fixed = re.sub("\x97", "—", fixed)
- fixed = re.sub("’", "’", fixed)
+ fixed = re.sub('\x97', '—', fixed)
+ fixed = re.sub('’', '’', fixed)
return fixed
def massageNCXText(self, description):
@@ -180,14 +180,14 @@ class CanWestPaper(BasicNewsRecipe):
ans = ['News']
# Find each instance of class="sectiontitle", class="featurecontent"
- for divtag in soup.findAll('div', attrs={'class': ["section_title02", "featurecontent"]}):
+ for divtag in soup.findAll('div', attrs={'class': ['section_title02', 'featurecontent']}):
if 'section_title' in ''.join(divtag['class']):
# div contains section title
if not divtag.h3:
continue
key = self.tag_to_string(divtag.h3, False)
ans.append(key)
- self.log("Section name %s" % key)
+ self.log('Section name %s' % key)
continue
# div contains article data
h1tag = divtag.find('h1')
diff --git a/recipes/respekt_magazine.recipe b/recipes/respekt_magazine.recipe
index aa5f76e08d..ecb9cae42b 100644
--- a/recipes/respekt_magazine.recipe
+++ b/recipes/respekt_magazine.recipe
@@ -72,8 +72,8 @@ class respektRecipe(BasicNewsRecipe):
# So that remove_tags_before works for this section
def preprocess_raw_html(self, raw_html, url):
root = lxml.html.fromstring(raw_html)
- if root.xpath("//title")[0].text == (u"Respekt • Despekt • RESPEKT"):
- raw_html = re.sub("h2","h1",raw_html)
+ if root.xpath('//title')[0].text == (u'Respekt • Despekt • RESPEKT'):
+ raw_html = re.sub('h2','h1',raw_html)
return raw_html
def parse_index(self):
@@ -82,17 +82,17 @@ class respektRecipe(BasicNewsRecipe):
current_edition_url = root1.xpath("//div[@class='heroissue']/a")[0].items()[0][1]
raw2 = self.index_to_soup('https://www.respekt.cz/' + current_edition_url, raw=True)
root2 = lxml.html.fromstring(raw2)
- self.cover_url = root2.xpath("//i[contains(@class, 'heroissue-cover')]")[0].get("data-src")
+ self.cover_url = root2.xpath("//i[contains(@class, 'heroissue-cover')]")[0].get('data-src')
# Fetch date
date_text = root2.xpath("//time[@class='heroissue-date']")[0].text.split(',')[1]
- s = date_text.split(" ")
+ s = date_text.split(' ')
# Are the dates of the issue in the same month and year?
if len(s) == 4 or len(s) == 7:
- date = "/".join([s[1].split(".")[0],s[2].split(".")[0],s[-1]])
+ date = '/'.join([s[1].split('.')[0],s[2].split('.')[0],s[-1]])
elif len(s) == 8:
- date = "/".join([s[1].split(".")[0],s[2].split(".")[0],s[3]])
+ date = '/'.join([s[1].split('.')[0],s[2].split('.')[0],s[3]])
self.conversion_options = {'pubdate':date}
- self.title = "Respekt magazine #" + "/".join(current_edition_url.split("/")[-1:-3:-1])
+ self.title = 'Respekt magazine #' + '/'.join(current_edition_url.split('/')[-1:-3:-1])
ans = []
for section in root2.xpath("//div[@class='col-md-6']/div[@class='issuedetail-categorized-sectionname']"):
section_name = section.text
@@ -100,7 +100,7 @@ class respektRecipe(BasicNewsRecipe):
article = section.getnext()
while hasattr(article, 'text') and not article.text.strip():
title = article.xpath("span[@class='issuedetail-categorized-title']")[0].text
- url = respekt_url + article.xpath("@href")[0]
+ url = respekt_url + article.xpath('@href')[0]
articles.append({'title':title,'url':url})
article = article.getnext()
ans.append((section_name,articles))
@@ -113,24 +113,24 @@ class respektRecipe(BasicNewsRecipe):
raw = u''.join(type(u'')(a) for a in soup.contents)
root = lxml.html.fromstring(raw)
# Fix Letem světem
- if "Letem sv" in root.xpath("//title")[0].text:
- p = root.xpath("//p")
+ if 'Letem sv' in root.xpath('//title')[0].text:
+ p = root.xpath('//p')
for par in p[:]:
next = par.getnext()
if par.getchildren():
child = par.getchildren()[0]
- if hasattr(next,"tag") and next.tag == "h2" and hasattr(child,"tag") and child.tag == "strong":
+ if hasattr(next,'tag') and next.tag == 'h2' and hasattr(child,'tag') and child.tag == 'strong':
text = child.text_content()
if next.text:
- next.text = next.text + u" • " + text
+ next.text = next.text + u' • ' + text
else:
if next.getchildren():
next_child = next.getchildren()[0]
- next_child.text = next_child.text + u" • " + text
+ next_child.text = next_child.text + u' • ' + text
par.getparent().remove(par)
# Insert text length
text = root.xpath("//div[@id='postcontent']")[0]
- article_length = u" • " + str(len(text.text_content().split(' '))) + ' slov'
+ article_length = u' • ' + str(len(text.text_content().split(' '))) + ' slov'
try:
aut = root.xpath("//div[@class='authorship-names']")[0]
if aut.getchildren() and aut.getchildren()[0].tag == 'a':
@@ -149,11 +149,11 @@ class respektRecipe(BasicNewsRecipe):
except:
pass
# Make images visible
- pictures = root.xpath("//picture")
+ pictures = root.xpath('//picture')
for picture in pictures:
- image = picture.xpath("//source")[0]
- image_link = [a for a in image.get('srcset').split(' ') if a[:4] == "http"][-1]
- e=E.img({"src":image_link})
+ image = picture.xpath('//source')[0]
+ image_link = [a for a in image.get('srcset').split(' ') if a[:4] == 'http'][-1]
+ e=E.img({'src':image_link})
picture.getparent().replace(picture,e)
# Properly indent
paragraphs = root.xpath('//p')
@@ -163,11 +163,11 @@ class respektRecipe(BasicNewsRecipe):
prev = par.getprevious()
# Do not indent after headings
if hasattr(prev,'tag') and prev.tag not in ['h2', 'h3']:
- par.attrib['class']="indent_first_line"
+ par.attrib['class']='indent_first_line'
# Fix subtitle for Téma
try:
o = root.xpath("//p[@class='post-perex']")[0]
- e = E.h2({"class":"post-subtitle"})
+ e = E.h2({'class':'post-subtitle'})
e.text = o.text
o.getparent().replace(o,e)
except:
diff --git a/recipes/reuters.recipe b/recipes/reuters.recipe
index 50f260a944..b3ecf2e42c 100644
--- a/recipes/reuters.recipe
+++ b/recipes/reuters.recipe
@@ -33,11 +33,11 @@ class Reuters(BasicNewsRecipe):
resolve_internal_links = True
ignore_duplicate_articles = {'url', 'title'}
- extra_css = """
+ extra_css = '''
.label, .auth { font-size:small; color:#202020; }
.figc { font-size:small; }
img {display:block; margin:0 auto;}
- """
+ '''
recipe_specific_options = {
'days': {
diff --git a/recipes/revista22.recipe b/recipes/revista22.recipe
index 7d2a55b2f1..9fe631e7e2 100644
--- a/recipes/revista22.recipe
+++ b/recipes/revista22.recipe
@@ -33,7 +33,7 @@ class Volkskrant(BasicNewsRecipe):
dict(id=['comments']),
dict(name=['script', 'noscript', 'style']),
]
- remove_attributes = ["class", "id", "name", "style"]
+ remove_attributes = ['class', 'id', 'name', 'style']
encoding = 'utf-8'
no_stylesheets = True
ignore_duplicate_articles = {'url'}
diff --git a/recipes/revista_veintitres.recipe b/recipes/revista_veintitres.recipe
index 4a992d78f5..4940889bd0 100644
--- a/recipes/revista_veintitres.recipe
+++ b/recipes/revista_veintitres.recipe
@@ -29,10 +29,10 @@ class Veintitres(BasicNewsRecipe):
auto_cleanup = True
auto_cleanup_keep = '//h1'
resolve_internal_links = True
- INDEX = "https://www.veintitres.com.ar"
- extra_css = """
+ INDEX = 'https://www.veintitres.com.ar'
+ extra_css = '''
img{margin-bottom: 0.8em}
- """
+ '''
conversion_options = {
'comment': description,
diff --git a/recipes/rts.recipe b/recipes/rts.recipe
index 08133c98f7..3ed5fba7d6 100644
--- a/recipes/rts.recipe
+++ b/recipes/rts.recipe
@@ -59,9 +59,9 @@ class RTS(BasicNewsRecipe):
soup.html['xml:lang'] = self.lang
soup.html['lang'] = self.lang
mlang = new_tag(soup, 'meta', [
- ("http-equiv", "Content-Language"), ("content", self.lang)])
+ ('http-equiv', 'Content-Language'), ('content', self.lang)])
mcharset = new_tag(soup, 'meta', [
- ("http-equiv", "Content-Type"), ("content", "text/html; charset=UTF-8")])
+ ('http-equiv', 'Content-Type'), ('content', 'text/html; charset=UTF-8')])
soup.head.insert(0, mlang)
soup.head.insert(1, mcharset)
return self.adeify_images(soup)
diff --git a/recipes/russiafeed.recipe b/recipes/russiafeed.recipe
index a90067e869..514da5d158 100644
--- a/recipes/russiafeed.recipe
+++ b/recipes/russiafeed.recipe
@@ -25,11 +25,11 @@ class RussiaFeed(BasicNewsRecipe):
publication_type = 'newsportal'
auto_cleanup = True
ignore_duplicate_articles = {'url'}
- extra_css = """
+ extra_css = '''
body{font-family: Roboto, Arial, sans-serif}
img{margin-top:1em; margin-bottom: 1em; display:block}
entry-title,entry-subtitle{font-family: Rajdhani, Poppins, Roboto, Arial, sans-serif}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/rzeczpospolita.recipe b/recipes/rzeczpospolita.recipe
index e9e13e04ff..f05b321b3a 100644
--- a/recipes/rzeczpospolita.recipe
+++ b/recipes/rzeczpospolita.recipe
@@ -19,9 +19,9 @@ class RzeczpospolitaRecipe(BasicNewsRecipe):
simultaneous_downloads = 5
feeds = []
- feeds.append((u"Wiadomości", u'http://www.rp.pl/rss/1056')) # Wydarzenia
- feeds.append((u"Ekonomia", u'http://www.rp.pl/rss/1004')) # Ekonomia
- feeds.append((u"Prawo", u'http://www.rp.pl/rss/1037')) # Prawo
+ feeds.append((u'Wiadomości', u'http://www.rp.pl/rss/1056')) # Wydarzenia
+ feeds.append((u'Ekonomia', u'http://www.rp.pl/rss/1004')) # Ekonomia
+ feeds.append((u'Prawo', u'http://www.rp.pl/rss/1037')) # Prawo
keep_only_tags = []
keep_only_tags.append(dict(name='h1', attrs={'id': 'article-title'}))
diff --git a/recipes/saskatoon_star_phoenix.recipe b/recipes/saskatoon_star_phoenix.recipe
index 6592ef51aa..6b89e889c7 100644
--- a/recipes/saskatoon_star_phoenix.recipe
+++ b/recipes/saskatoon_star_phoenix.recipe
@@ -123,24 +123,24 @@ class CanWestPaper(BasicNewsRecipe):
continue
break
if daysback == 7:
- self.log("\nCover unavailable")
+ self.log('\nCover unavailable')
cover = None
return cover
def fixChars(self, string):
# Replace lsquo (\x91)
- fixed = re.sub("\x91", "‘", string)
+ fixed = re.sub('\x91', '‘', string)
# Replace rsquo (\x92)
- fixed = re.sub("\x92", "’", fixed)
+ fixed = re.sub('\x92', '’', fixed)
# Replace ldquo (\x93)
- fixed = re.sub("\x93", "“", fixed)
+ fixed = re.sub('\x93', '“', fixed)
# Replace rdquo (\x94)
- fixed = re.sub("\x94", "”", fixed)
+ fixed = re.sub('\x94', '”', fixed)
# Replace ndash (\x96)
- fixed = re.sub("\x96", "–", fixed)
+ fixed = re.sub('\x96', '–', fixed)
# Replace mdash (\x97)
- fixed = re.sub("\x97", "—", fixed)
- fixed = re.sub("’", "’", fixed)
+ fixed = re.sub('\x97', '—', fixed)
+ fixed = re.sub('’', '’', fixed)
return fixed
def massageNCXText(self, description):
@@ -180,14 +180,14 @@ class CanWestPaper(BasicNewsRecipe):
ans = ['News']
# Find each instance of class="sectiontitle", class="featurecontent"
- for divtag in soup.findAll('div', attrs={'class': ["section_title02", "featurecontent"]}):
+ for divtag in soup.findAll('div', attrs={'class': ['section_title02', 'featurecontent']}):
if ''.join(divtag['class']).startswith('section_title'):
# div contains section title
if not divtag.h3:
continue
key = self.tag_to_string(divtag.h3, False)
ans.append(key)
- self.log("Section name %s" % key)
+ self.log('Section name %s' % key)
continue
# div contains article data
h1tag = divtag.find('h1')
diff --git a/recipes/science_news.recipe b/recipes/science_news.recipe
index f3de3c20b5..50198a3d19 100644
--- a/recipes/science_news.recipe
+++ b/recipes/science_news.recipe
@@ -12,8 +12,8 @@ from calibre.web.feeds.news import BasicNewsRecipe, prefixed_classes
class ScienceNewsIssue(BasicNewsRecipe):
title = u'Science News'
- description = ("Science News is an award-winning bi-weekly newsmagazine covering the most important research"
- " in all fields of science. This recipe downloads all the articles from the latest issue.")
+ description = ('Science News is an award-winning bi-weekly newsmagazine covering the most important research'
+ ' in all fields of science. This recipe downloads all the articles from the latest issue.')
category = u'Science, Technology, News'
publisher = u'Society for Science & the Public'
language = 'en'
@@ -54,16 +54,16 @@ class ScienceNewsIssue(BasicNewsRecipe):
# Get articles
soup = self.index_to_soup(url)
soup = soup.find('main', attrs={'id':'content'})
- re_article = re.compile("https://www.sciencenews.org/article/")
+ re_article = re.compile('https://www.sciencenews.org/article/')
stories = []
past_urls = set()
for sec in soup.find_all(href=re_article):
- article_url = sec["href"]
+ article_url = sec['href']
article_title = sec.text.strip()
# Ignore image URLs which do not have text title
- if article_title == "":
+ if article_title == '':
continue
# Ignore if link is a duplicate
@@ -73,12 +73,12 @@ class ScienceNewsIssue(BasicNewsRecipe):
past_urls.add(article_url)
self.log('\t', article_title, ' ', article_url)
article_info = {
- "url": article_url,
- "title": article_title,
+ 'url': article_url,
+ 'title': article_title,
}
stories.append(article_info)
index = [
- ("Articles", stories),
+ ('Articles', stories),
]
return index
diff --git a/recipes/scientific_american.recipe b/recipes/scientific_american.recipe
index 4a0374ec70..07b7c544e4 100644
--- a/recipes/scientific_american.recipe
+++ b/recipes/scientific_american.recipe
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-__license__ = "GPL v3"
+__license__ = 'GPL v3'
import json
from datetime import datetime
@@ -9,29 +9,29 @@ from calibre.web.feeds.news import BasicNewsRecipe, prefixed_classes
class ScientificAmerican(BasicNewsRecipe):
- title = "Scientific American"
- description = "Popular Science. Monthly magazine. Should be downloaded around the middle of each month."
- category = "science"
- __author__ = "Kovid Goyal"
+ title = 'Scientific American'
+ description = 'Popular Science. Monthly magazine. Should be downloaded around the middle of each month.'
+ category = 'science'
+ __author__ = 'Kovid Goyal'
no_stylesheets = True
- language = "en"
- publisher = "Nature Publishing Group"
+ language = 'en'
+ publisher = 'Nature Publishing Group'
remove_empty_feeds = True
remove_javascript = True
- timefmt = " [%B %Y]"
- remove_attributes = ["height", "width"]
+ timefmt = ' [%B %Y]'
+ remove_attributes = ['height', 'width']
masthead_url = (
- "https://static.scientificamerican.com/sciam/assets/Image/newsletter/salogo.png"
+ 'https://static.scientificamerican.com/sciam/assets/Image/newsletter/salogo.png'
)
- extra_css = """
+ extra_css = '''
[class^="article_dek-"] { font-style:italic; color:#202020; }
[class^="article_authors-"] {font-size:small; color:#202020; }
[class^="article__image-"], [class^="lead_image-"], .calibre-nuked-tag-figcaption { font-size:small; }
[class^="bio-"] { font-size:small; color:#404040; }
em, blockquote { color:#202020; }
- """
+ '''
- needs_subscription = "optional"
+ needs_subscription = 'optional'
keep_only_tags = [
prefixed_classes(
@@ -59,10 +59,10 @@ class ScientificAmerican(BasicNewsRecipe):
def get_browser(self, *args):
br = BasicNewsRecipe.get_browser(self)
if self.username and self.password:
- br.open("https://www.scientificamerican.com/account/login/")
- br.select_form(predicate=lambda f: f.attrs.get("id") == "login")
- br["emailAddress"] = self.username
- br["password"] = self.password
+ br.open('https://www.scientificamerican.com/account/login/')
+ br.select_form(predicate=lambda f: f.attrs.get('id') == 'login')
+ br['emailAddress'] = self.username
+ br['password'] = self.password
br.submit()
return br
@@ -87,50 +87,50 @@ class ScientificAmerican(BasicNewsRecipe):
if d and isinstance(d, str):
issue = d
else:
- fp_soup = self.index_to_soup("https://www.scientificamerican.com")
+ fp_soup = self.index_to_soup('https://www.scientificamerican.com')
curr_issue_link = fp_soup.find(**prefixed_classes('latest_issue_links-'))
if not curr_issue_link:
- self.abort_recipe_processing("Unable to find issue link")
- issue = 'https://www.scientificamerican.com' + curr_issue_link.a["href"]
+ self.abort_recipe_processing('Unable to find issue link')
+ issue = 'https://www.scientificamerican.com' + curr_issue_link.a['href']
soup = self.index_to_soup(issue)
- script = soup.find("script", id="__DATA__")
+ script = soup.find('script', id='__DATA__')
if not script:
- self.abort_recipe_processing("Unable to find script")
+ self.abort_recipe_processing('Unable to find script')
- JSON = script.contents[0].split('JSON.parse(`')[1].replace("\\\\", "\\")
+ JSON = script.contents[0].split('JSON.parse(`')[1].replace('\\\\', '\\')
data = json.JSONDecoder().raw_decode(JSON)[0]
issue_info = (
data
- .get("initialData", {})
- .get("issueData", {})
+ .get('initialData', {})
+ .get('issueData', {})
)
if not issue_info:
- self.abort_recipe_processing("Unable to find issue info")
+ self.abort_recipe_processing('Unable to find issue info')
- self.cover_url = issue_info["image_url"] + "?w=800"
+ self.cover_url = issue_info['image_url'] + '?w=800'
- edition_date = datetime.strptime(issue_info["issue_date"], "%Y-%m-%d")
- self.timefmt = f" [{edition_date:%B %Y}]"
+ edition_date = datetime.strptime(issue_info['issue_date'], '%Y-%m-%d')
+ self.timefmt = f' [{edition_date:%B %Y}]'
feeds = {}
- for section in issue_info.get("article_previews", {}):
- for article in issue_info.get("article_previews", {}).get(section, []):
- self.log('\t', article["title"])
+ for section in issue_info.get('article_previews', {}):
+ for article in issue_info.get('article_previews', {}).get(section, []):
+ self.log('\t', article['title'])
if section.startswith('featur'):
feed_name = section.capitalize()
else:
- feed_name = article["category"]
+ feed_name = article['category']
if feed_name not in feeds:
feeds[feed_name] = []
feeds[feed_name].append(
{
- "title": article["title"],
- "url": urljoin(
- "https://www.scientificamerican.com/article/",
- article["slug"],
+ 'title': article['title'],
+ 'url': urljoin(
+ 'https://www.scientificamerican.com/article/',
+ article['slug'],
),
- "description": article["summary"],
+ 'description': article['summary'],
}
)
sorted_feeds = dict(sorted(feeds.items(), key=lambda x: (not x[0].startswith('Featur'), x[0])))
diff --git a/recipes/scmp.recipe b/recipes/scmp.recipe
index b8012e119e..8856c1543f 100644
--- a/recipes/scmp.recipe
+++ b/recipes/scmp.recipe
@@ -1,7 +1,7 @@
#!/usr/bin/env python
-"""
+'''
scmp.com
-"""
+'''
import json
import time
@@ -74,8 +74,8 @@ def load_article_from_json(raw, root):
class SCMP(BasicNewsRecipe):
- title = "South China Morning Post"
- __author__ = "unkn0wn"
+ title = 'South China Morning Post'
+ __author__ = 'unkn0wn'
description = (
'The South China Morning Post is a leading news media company that has reported on China and Asia '
'for more than a century with global impact. Founded in 1903, SCMP is headquartered in Hong Kong, '
@@ -84,18 +84,18 @@ class SCMP(BasicNewsRecipe):
'and inspiring through journalism of the highest standards. Our vision is to “Elevate Thought”, '
'and our mission is to “Lead the global conversation about China”.'
)
- publisher = "South China Morning Post Publishers Ltd."
+ publisher = 'South China Morning Post Publishers Ltd.'
oldest_article = 1
no_stylesheets = True
remove_javascript = True
remove_attributes = ['width', 'height']
- encoding = "utf-8"
+ encoding = 'utf-8'
use_embedded_content = False
- language = "en_HK"
+ language = 'en_HK'
remove_empty_feeds = True
resolve_internal_links = True
- publication_type = "newspaper"
- ignore_duplicate_articles = {"title", "url"}
+ publication_type = 'newspaper'
+ ignore_duplicate_articles = {'title', 'url'}
extra_css = 'blockquote, em { color: #202020; }'
masthead_url = 'https://upload.wikimedia.org/wikipedia/commons/c/c3/SCMP_logo.svg'
@@ -139,29 +139,29 @@ class SCMP(BasicNewsRecipe):
remove_tags = [
dict(
classes(
- "sticky-wrap relative social-media social-media--extended__shares"
- " article-body-comment scmp_button_comment_wrapper social-media--extended__in-site"
- " footer scmp-advert-tile sidebar-col related-article share-widget"
+ 'sticky-wrap relative social-media social-media--extended__shares'
+ ' article-body-comment scmp_button_comment_wrapper social-media--extended__in-site'
+ ' footer scmp-advert-tile sidebar-col related-article share-widget'
)
),
- dict(attrs={"addthis_title": True}),
- dict(name=["script", "style"]),
+ dict(attrs={'addthis_title': True}),
+ dict(name=['script', 'style']),
]
# https://www.scmp.com/rss
feeds = [
- ("Hong Kong", "https://www.scmp.com/rss/2/feed"),
- ("China", "https://www.scmp.com/rss/4/feed"),
- ("Asia", "https://www.scmp.com/rss/3/feed"),
- ("World", "https://www.scmp.com/rss/5/feed"),
- ("Business", "https://www.scmp.com/rss/92/feed"),
- ("Tech", "https://www.scmp.com/rss/36/feed"),
- ("Life", "https://www.scmp.com/rss/94/feed"),
- ("Culture", "https://www.scmp.com/rss/322296/feed"),
- ("Sport", "https://www.scmp.com/rss/95/feed"),
- ("Post Mag", "https://www.scmp.com/rss/71/feed"),
- ("Style", "https://www.scmp.com/rss/72/feed"),
- ("News", 'https://www.scmp.com/rss/91/feed')
+ ('Hong Kong', 'https://www.scmp.com/rss/2/feed'),
+ ('China', 'https://www.scmp.com/rss/4/feed'),
+ ('Asia', 'https://www.scmp.com/rss/3/feed'),
+ ('World', 'https://www.scmp.com/rss/5/feed'),
+ ('Business', 'https://www.scmp.com/rss/92/feed'),
+ ('Tech', 'https://www.scmp.com/rss/36/feed'),
+ ('Life', 'https://www.scmp.com/rss/94/feed'),
+ ('Culture', 'https://www.scmp.com/rss/322296/feed'),
+ ('Sport', 'https://www.scmp.com/rss/95/feed'),
+ ('Post Mag', 'https://www.scmp.com/rss/71/feed'),
+ ('Style', 'https://www.scmp.com/rss/72/feed'),
+ ('News', 'https://www.scmp.com/rss/91/feed')
]
def print_version(self, url):
diff --git a/recipes/scprint.recipe b/recipes/scprint.recipe
index e4a2edc3b2..c7497aa6f3 100644
--- a/recipes/scprint.recipe
+++ b/recipes/scprint.recipe
@@ -5,7 +5,7 @@ class SCPrintMagazine(BasicNewsRecipe):
title = u'SC Print Magazine'
__author__ = u'Tony Maro'
description = u'Last print version of the data security magazine'
- INDEX = "http://www.scmagazineus.com/issuearchive/"
+ INDEX = 'http://www.scmagazineus.com/issuearchive/'
no_stylesheets = True
language = 'en'
keep_only_tags = [dict(id=['article', 'review'])]
@@ -52,9 +52,9 @@ class SCPrintMagazine(BasicNewsRecipe):
if mylink.get('href'):
artlink = mylink['href']
artlink = artlink.replace(
- "/article", "/printarticle")
+ '/article', '/printarticle')
artlink = artlink.replace(
- "/review", "/printreview")
+ '/review', '/printreview')
deck = onearticle.find(
'div', attrs={'class': 'deck'})
if deck is not None:
@@ -72,7 +72,7 @@ class SCPrintMagazine(BasicNewsRecipe):
br['ctl00$ctl00$cphAllPageContent$cphMainContent$SubscriberEasyLoginView1$txtEmail'] = self.username
br['ctl00$ctl00$cphAllPageContent$cphMainContent$SubscriberEasyLoginView1$txtPassword'] = self.password
raw = br.submit(
- "ctl00$ctl00$cphAllPageContent$cphMainContent$SubscriberEasyLoginView1$btnLogin").read()
+ 'ctl00$ctl00$cphAllPageContent$cphMainContent$SubscriberEasyLoginView1$btnLogin').read()
if 'Logout' not in raw:
raise LoginFailed(
_('Failed to log in, check your username and password for'
diff --git a/recipes/screen_rant.recipe b/recipes/screen_rant.recipe
index 0df33a3337..9ffc250503 100644
--- a/recipes/screen_rant.recipe
+++ b/recipes/screen_rant.recipe
@@ -7,7 +7,7 @@ class AdvancedUserRecipe1716109928(BasicNewsRecipe):
title = 'Screen Rant'
description = (
'ScreenRant is a digital publication. ScreenRant has grown into one of the'
- " world’s most prominent entertainment news sources. ScreenRant don’t just break and report news;"
+ ' world’s most prominent entertainment news sources. ScreenRant don’t just break and report news;'
' they analyze and editorialize it with unique insight and inside information.')
__author__ = 'Spicy Poison'
encoding = 'utf-8'
diff --git a/recipes/seminar_magazine.recipe b/recipes/seminar_magazine.recipe
index 7fc77f46d0..0737a9dce9 100644
--- a/recipes/seminar_magazine.recipe
+++ b/recipes/seminar_magazine.recipe
@@ -22,7 +22,7 @@ class Seminar(BasicNewsRecipe):
soup = self.index_to_soup('https://www.india-seminar.com/')
citem = soup.find('img', src=lambda x: x and 'covers' in x)
if citem:
- cover_url = "https://www.india-seminar.com/" + citem['src']
+ cover_url = 'https://www.india-seminar.com/' + citem['src']
return cover_url
def parse_index(self):
diff --git a/recipes/sign_of_the_times.recipe b/recipes/sign_of_the_times.recipe
index c29678fd8f..f6a1dc3e11 100644
--- a/recipes/sign_of_the_times.recipe
+++ b/recipes/sign_of_the_times.recipe
@@ -10,11 +10,11 @@ class SignOfTheTimes(BasicNewsRecipe):
max_articles_per_feed = 50
use_embedded_content = False
- extra_css = """
+ extra_css = '''
h2{font-size: large; margin: .2em 0; text-decoration: none;}
.image-caption{font-size: medium; font-style:italic; margin: 0 0 1em 0;}
.article-info{font-size: small; font-style:italic; margin: 0 0 .5em 0;}
- """
+ '''
remove_stylesheets = True
remove_tags = [
diff --git a/recipes/singtaohk.recipe b/recipes/singtaohk.recipe
index 9372f3a9b1..abfb386697 100644
--- a/recipes/singtaohk.recipe
+++ b/recipes/singtaohk.recipe
@@ -5,7 +5,7 @@ from calibre.web.feeds.news import BasicNewsRecipe, classes
class STHKRecipe(BasicNewsRecipe):
title = '星島日報 (香港)'
__author__ = 'unkn0wn'
- description = 'The Sing Tao Daily is among Hong Kong\'s oldest Chinese language newspapers. (https://std.stheadline.com/)'
+ description = "The Sing Tao Daily is among Hong Kong's oldest Chinese language newspapers. (https://std.stheadline.com/)"
category = 'Chinese, News, Hong Kong'
language = 'zh'
encoding = 'utf-8'
diff --git a/recipes/skeptical_enquirer.recipe b/recipes/skeptical_enquirer.recipe
index af22b4fcb4..d3cce85add 100644
--- a/recipes/skeptical_enquirer.recipe
+++ b/recipes/skeptical_enquirer.recipe
@@ -25,14 +25,14 @@ class FreeInquiry(BasicNewsRecipe):
ignore_duplicate_articles = {'url'}
remove_empty_feeds = True
needs_subscription = True
- extra_css = """
+ extra_css = '''
.entry-header{
text-transform: uppercase;
vertical-align: baseline;
display: inline;
}
ul li{display: inline}
- """
+ '''
remove_tags = [
classes(
diff --git a/recipes/smh.recipe b/recipes/smh.recipe
index b732905229..ce1f422889 100644
--- a/recipes/smh.recipe
+++ b/recipes/smh.recipe
@@ -1,50 +1,50 @@
-__license__ = "GPL v3"
-__copyright__ = "2010-2011, Darko Miletic
"
-"""
+__license__ = 'GPL v3'
+__copyright__ = '2010-2011, Darko Miletic '
+'''
smh.com.au
-"""
+'''
from calibre.web.feeds.news import BasicNewsRecipe
class Smh_au(BasicNewsRecipe):
- title = "The Sydney Morning Herald"
- __author__ = "Darko Miletic"
- description = "Breaking news from Sydney, Australia and the world. Features the latest business, sport, entertainment, travel, lifestyle, and technology news." # noqa: E501
- publisher = "Fairfax Digital"
- category = "news, politics, Australia, Sydney"
+ title = 'The Sydney Morning Herald'
+ __author__ = 'Darko Miletic'
+ description = 'Breaking news from Sydney, Australia and the world. Features the latest business, sport, entertainment, travel, lifestyle, and technology news.' # noqa: E501
+ publisher = 'Fairfax Digital'
+ category = 'news, politics, Australia, Sydney'
oldest_article = 2
max_articles_per_feed = 200
no_stylesheets = True
- ignore_duplicate_articles = {"title", "url"}
+ ignore_duplicate_articles = {'title', 'url'}
use_embedded_content = False
- encoding = "utf-8"
+ encoding = 'utf-8'
- language = "en_AU"
+ language = 'en_AU'
remove_empty_feeds = True
- masthead_url = "http://images.smh.com.au/2010/02/02/1087188/smh-620.jpg"
- publication_type = "newspaper"
+ masthead_url = 'http://images.smh.com.au/2010/02/02/1087188/smh-620.jpg'
+ publication_type = 'newspaper'
- keep_only_tags = [dict(name="article")]
+ keep_only_tags = [dict(name='article')]
remove_tags = [
- dict(name=["button"]),
- dict(id=["saveTooltip"]),
- dict(attrs={"class": "noPrint"}),
+ dict(name=['button']),
+ dict(id=['saveTooltip']),
+ dict(attrs={'class': 'noPrint'}),
]
# https://www.smh.com.au/rssheadlines
feeds = [
- ("Latest News", "https://www.smh.com.au/rss/feed.xml"),
- ("Federal Politics", "https://www.smh.com.au/rss/politics/federal.xml"),
- ("NSW News", "https://www.smh.com.au/rss/national/nsw.xml"),
- ("World", "https://www.smh.com.au/rss/world.xml"),
- ("National", "https://www.smh.com.au/rss/national.xml"),
- ("Business", "https://www.smh.com.au/rss/business.xml"),
- ("Culture", "https://www.smh.com.au/rss/culture.xml"),
- ("Technology", "https://www.smh.com.au/rss/technology.xml"),
- ("Environment", "https://www.smh.com.au/rss/environment.xml"),
- ("Lifestyle", "https://www.smh.com.au/rss/lifestyle.xml"),
- ("Property", "https://www.smh.com.au/rss/property.xml"),
- ("Sport", "https://www.smh.com.au/rss/sport.xml"),
- ("Ruby League", "https://www.smh.com.au/rss/sport/nrl.xml"),
- ("AFL", "https://www.smh.com.au/rss/sport/afl.xml"),
+ ('Latest News', 'https://www.smh.com.au/rss/feed.xml'),
+ ('Federal Politics', 'https://www.smh.com.au/rss/politics/federal.xml'),
+ ('NSW News', 'https://www.smh.com.au/rss/national/nsw.xml'),
+ ('World', 'https://www.smh.com.au/rss/world.xml'),
+ ('National', 'https://www.smh.com.au/rss/national.xml'),
+ ('Business', 'https://www.smh.com.au/rss/business.xml'),
+ ('Culture', 'https://www.smh.com.au/rss/culture.xml'),
+ ('Technology', 'https://www.smh.com.au/rss/technology.xml'),
+ ('Environment', 'https://www.smh.com.au/rss/environment.xml'),
+ ('Lifestyle', 'https://www.smh.com.au/rss/lifestyle.xml'),
+ ('Property', 'https://www.smh.com.au/rss/property.xml'),
+ ('Sport', 'https://www.smh.com.au/rss/sport.xml'),
+ ('Ruby League', 'https://www.smh.com.au/rss/sport/nrl.xml'),
+ ('AFL', 'https://www.smh.com.au/rss/sport/afl.xml'),
]
diff --git a/recipes/sol_haber.recipe b/recipes/sol_haber.recipe
index 832d512974..164ff1da5f 100644
--- a/recipes/sol_haber.recipe
+++ b/recipes/sol_haber.recipe
@@ -69,7 +69,7 @@ class SolHaberRecipe(BasicNewsRecipe):
dict(name='div', attrs={'class': re.compile(storybody_reg_exp, re.IGNORECASE)})]
def get_masthead_title(self):
- return self.title + "(" + self.end_date + ")"
+ return self.title + '(' + self.end_date + ')'
def parse_index(self):
diff --git a/recipes/star_gazetesi.recipe b/recipes/star_gazetesi.recipe
index 86a7b9955a..4a4243e964 100644
--- a/recipes/star_gazetesi.recipe
+++ b/recipes/star_gazetesi.recipe
@@ -19,37 +19,37 @@ class Star (BasicNewsRecipe):
compress_news_images = True
# h1 and h2 tag classes respectively
- extra_css = ".title-1 {font-size: 20px; color: #ed0000; text-align: center;}"
- extra_css += ".title-2 {font-size: 16px;}"
+ extra_css = '.title-1 {font-size: 20px; color: #ed0000; text-align: center;}'
+ extra_css += '.title-2 {font-size: 16px;}'
keep_only_tags = [
- dict(name="h1", attrs={"class": "title-1"}),
- dict(name="h2", attrs={"class": "title-2"}),
- dict(name="div", attrs={"class": "time"}),
- dict(name="img", attrs={"class": "margin-bottom-lg"}),
- dict(name="div", attrs={"class": "detay"}),
+ dict(name='h1', attrs={'class': 'title-1'}),
+ dict(name='h2', attrs={'class': 'title-2'}),
+ dict(name='div', attrs={'class': 'time'}),
+ dict(name='img', attrs={'class': 'margin-bottom-lg'}),
+ dict(name='div', attrs={'class': 'detay'}),
]
feeds = [
- ("MANSET", "http://www.star.com.tr/rss/mansetler.xml"),
- ("GÜNCEL", "http://www.star.com.tr/rss/guncel.xml"),
- ("POLİTİKA", "http://www.star.com.tr/rss/politika.xml"),
- ("EKONOMİ", "http://www.star.com.tr/rss/ekonomi.xml"),
- ("DÜNYA", "http://www.star.com.tr/rss/dunya.xml"),
- ("SON DAKİKA", "http://www.star.com.tr/rss/sondakika.xml"),
- ("YAZARLAR", "http://www.star.com.tr/rss/yazarlar.xml"),
- ("SPOR", "http://www.star.com.tr/rss/spor.xml"),
- ("SİNEMA", "http://www.star.com.tr/rss/sinema.xml"),
- ("SANAT", "http://www.star.com.tr/rss/sanat.xml"),
- ("MAGAZİN", "http://www.star.com.tr/rss/magazin.xml"),
- ("MEDYA", "http://www.star.com.tr/rss/medya.xml"),
- ("SAĞLIK", "http://www.star.com.tr/rss/saglik.xml"),
- ("TEKNOLOJİ", "http://www.star.com.tr/rss/teknoloji.xml"),
- ("AÇIK GÖRÜŞ", "http://www.star.com.tr/rss/acikgorus.xml"),
- ("PAZAR", "http://www.star.com.tr/rss/pazar.xml"),
- ("CUMARTESİ", "http://www.star.com.tr/rss/cumartesi.xml"),
- ("DİZİ", "http://www.star.com.tr/rss/dizi.xml"),
- ("ANKARA", "http://www.star.com.tr/rss/ankara.xml"),
- ("MEMURLAR", "http://www.star.com.tr/rss/memurlar.xml"),
- ("OTO HAYAT", "http://www.star.com.tr/rss/otohayat.xml"),
+ ('MANSET', 'http://www.star.com.tr/rss/mansetler.xml'),
+ ('GÜNCEL', 'http://www.star.com.tr/rss/guncel.xml'),
+ ('POLİTİKA', 'http://www.star.com.tr/rss/politika.xml'),
+ ('EKONOMİ', 'http://www.star.com.tr/rss/ekonomi.xml'),
+ ('DÜNYA', 'http://www.star.com.tr/rss/dunya.xml'),
+ ('SON DAKİKA', 'http://www.star.com.tr/rss/sondakika.xml'),
+ ('YAZARLAR', 'http://www.star.com.tr/rss/yazarlar.xml'),
+ ('SPOR', 'http://www.star.com.tr/rss/spor.xml'),
+ ('SİNEMA', 'http://www.star.com.tr/rss/sinema.xml'),
+ ('SANAT', 'http://www.star.com.tr/rss/sanat.xml'),
+ ('MAGAZİN', 'http://www.star.com.tr/rss/magazin.xml'),
+ ('MEDYA', 'http://www.star.com.tr/rss/medya.xml'),
+ ('SAĞLIK', 'http://www.star.com.tr/rss/saglik.xml'),
+ ('TEKNOLOJİ', 'http://www.star.com.tr/rss/teknoloji.xml'),
+ ('AÇIK GÖRÜŞ', 'http://www.star.com.tr/rss/acikgorus.xml'),
+ ('PAZAR', 'http://www.star.com.tr/rss/pazar.xml'),
+ ('CUMARTESİ', 'http://www.star.com.tr/rss/cumartesi.xml'),
+ ('DİZİ', 'http://www.star.com.tr/rss/dizi.xml'),
+ ('ANKARA', 'http://www.star.com.tr/rss/ankara.xml'),
+ ('MEMURLAR', 'http://www.star.com.tr/rss/memurlar.xml'),
+ ('OTO HAYAT', 'http://www.star.com.tr/rss/otohayat.xml'),
]
diff --git a/recipes/strange_horizons.recipe b/recipes/strange_horizons.recipe
index d5ff6dcb88..1fd922b2c1 100644
--- a/recipes/strange_horizons.recipe
+++ b/recipes/strange_horizons.recipe
@@ -56,7 +56,7 @@ class StrangeHorizons(BasicNewsRecipe):
desc = ''
if exp := ti.find_next_sibling(**classes('excerpt')):
desc = self.tag_to_string(exp) + desc
- desc = re.sub(r"\d{5} ", "", desc)
+ desc = re.sub(r'\d{5} ', '', desc)
if auth := ti.find_next_sibling(**classes('author')):
desc = self.tag_to_string(auth) + ' | ' + desc
@@ -64,5 +64,5 @@ class StrangeHorizons(BasicNewsRecipe):
continue
self.log(sec, '\n\t', title, '\n\t', desc, '\n\t\t', url)
- feeds_dict[sec].append({"title": title, "url": url, "description": desc})
+ feeds_dict[sec].append({'title': title, 'url': url, 'description': desc})
return [(section, articles) for section, articles in feeds_dict.items()]
diff --git a/recipes/taz_rss.recipe b/recipes/taz_rss.recipe
index 8b256afc46..b9e497ca77 100644
--- a/recipes/taz_rss.recipe
+++ b/recipes/taz_rss.recipe
@@ -47,7 +47,7 @@ class TazRSSRecipe(BasicNewsRecipe):
no_stylesheets = True # default value is False, but True makes process much faster
keep_only_tags = [
dict(name=['div'], attrs={
- 'class': re.compile(r".*\bsect_article\b.*")})
+ 'class': re.compile(r'.*\bsect_article\b.*')})
]
remove_tags = [
dict(name=['div'], attrs={'class': 'sectfoot'}),
diff --git a/recipes/telepolis.recipe b/recipes/telepolis.recipe
index a62cccbb79..3df56c972e 100644
--- a/recipes/telepolis.recipe
+++ b/recipes/telepolis.recipe
@@ -22,4 +22,4 @@ class Telepolis(BasicNewsRecipe):
remove_tags = [dict(name='p', attrs={'class':'printversion__back-to-article printversion--hide'})]
def get_article_url(self, article):
- return article.link + "&view=print"
+ return article.link + '&view=print'
diff --git a/recipes/thairath.recipe b/recipes/thairath.recipe
index 0fe46d6faf..d097d09927 100644
--- a/recipes/thairath.recipe
+++ b/recipes/thairath.recipe
@@ -57,4 +57,4 @@ class AdvancedUserRecipe1271637235(BasicNewsRecipe):
remove_tags.append(dict(name='id', attrs={'class': 'footer'}))
remove_tags.append(
- dict(name="ul", attrs={'id': 'banner-highlights-images'}))
+ dict(name='ul', attrs={'id': 'banner-highlights-images'}))
diff --git a/recipes/the_diplomat.recipe b/recipes/the_diplomat.recipe
index fde4a0f1a7..a386d3bf6b 100644
--- a/recipes/the_diplomat.recipe
+++ b/recipes/the_diplomat.recipe
@@ -27,7 +27,7 @@ class Diplomat(BasicNewsRecipe):
soup = self.index_to_soup('https://thediplomat.com')
tag = soup.find(attrs={'class': 'td-nav-mag'})
if tag:
- url = tag.find('img')['src'].split("/")[-1]
+ url = tag.find('img')['src'].split('/')[-1]
self.cover_url = ('https://magazine.thediplomat.com/media/1080/' +
url)
# /ads/magazine/cover/td-mag-s-1/issue_89_cover.jpg
@@ -67,5 +67,5 @@ class Diplomat(BasicNewsRecipe):
def preprocess_html(self, soup):
for img in soup.findAll('img', attrs={'src': True}):
- img['src'] = img['src'].replace("td-story-s-1", "td-story-s-2")
+ img['src'] = img['src'].replace('td-story-s-1', 'td-story-s-2')
return soup
diff --git a/recipes/the_federalist.recipe b/recipes/the_federalist.recipe
index 6fa4010168..d77f27b13c 100644
--- a/recipes/the_federalist.recipe
+++ b/recipes/the_federalist.recipe
@@ -23,10 +23,10 @@ class Federalist(BasicNewsRecipe):
use_embedded_content = False
remove_attributes = ['xmlns', 'lang', 'style', 'width', 'height']
- extra_css = """
+ extra_css = '''
.shortbio,.article-excerpt{font-style: italic}
.article-author-details,.article-author-description,.article-meta-author,.article-meta-date,.article-thumbnail-caption{font-size: small}
- """
+ '''
keep_only_tags = [
classes(
diff --git a/recipes/the_nation.recipe b/recipes/the_nation.recipe
index 20a4430cb3..ba10ce0ded 100644
--- a/recipes/the_nation.recipe
+++ b/recipes/the_nation.recipe
@@ -28,11 +28,11 @@ class Thenation(BasicNewsRecipe):
login_url = 'http://www.thenation.com/user?destination=%3Cfront%3E'
publication_type = 'magazine'
needs_subscription = 'optional'
- exra_css = """
+ exra_css = '''
body{font-family: Arial,Helvetica,sans-serif;}
.print-created{font-size: small;}
.caption{display: block; font-size: x-small;}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
@@ -47,7 +47,7 @@ class Thenation(BasicNewsRecipe):
]
remove_attributes = ['lang']
- feeds = [(u"Articles", u'http://www.thenation.com/rss/articles')]
+ feeds = [(u'Articles', u'http://www.thenation.com/rss/articles')]
def get_browser(self):
br = BasicNewsRecipe.get_browser(self)
diff --git a/recipes/the_philippine_daily_inquirer.recipe b/recipes/the_philippine_daily_inquirer.recipe
index 70a6b84cc5..222414159a 100644
--- a/recipes/the_philippine_daily_inquirer.recipe
+++ b/recipes/the_philippine_daily_inquirer.recipe
@@ -5,7 +5,7 @@ from calibre.web.feeds.recipes import BasicNewsRecipe
class PhilippineDailyInquirer(BasicNewsRecipe):
title = 'The Philippine Daily Inquirer'
- custom_title = "The Philippine Daily Inquirer - " + \
+ custom_title = 'The Philippine Daily Inquirer - ' + \
time.strftime('%d %b %Y %I:%M %p')
__author__ = 'jde'
__date__ = '03 June 2012'
diff --git a/recipes/the_saturday_paper.recipe b/recipes/the_saturday_paper.recipe
index 46276e4768..d71e09446f 100644
--- a/recipes/the_saturday_paper.recipe
+++ b/recipes/the_saturday_paper.recipe
@@ -32,7 +32,7 @@ class SaturdayPaper(BasicNewsRecipe):
' article-page__sidebar article-page__social__icons share-wrapper article-footer-container')
]
remove_tags_after = [
- {"name": "div", "class": "end-matter"},
+ {'name': 'div', 'class': 'end-matter'},
]
@@ -62,7 +62,7 @@ class SaturdayPaper(BasicNewsRecipe):
title = a.find(class_='article__title')
title = self.tag_to_string(title)
- url = a.find(class_="article__title_link")
+ url = a.find(class_='article__title_link')
if url is None:
continue
url = url['href']
diff --git a/recipes/the_week_magazine_free.recipe b/recipes/the_week_magazine_free.recipe
index 47bc045d2d..35718ae649 100644
--- a/recipes/the_week_magazine_free.recipe
+++ b/recipes/the_week_magazine_free.recipe
@@ -9,7 +9,7 @@ class TheWeek(BasicNewsRecipe):
title = 'The Week'
__author__ = 'unkn0wn'
description = (
- 'The Week is for readers who want to know what\'s going on in the world, without having to read '
+ "The Week is for readers who want to know what's going on in the world, without having to read "
'several daily newspapers or get wrapped up in the endless news cycle. For every important story, '
'our editors carefully select commentary from all sides of the debate and artfully stitch them together '
'into one concise read. By showing you every perspective, we enable you to form your own opinion.'
diff --git a/recipes/the_week_uk.recipe b/recipes/the_week_uk.recipe
index 674d44f88c..32f4ca0495 100644
--- a/recipes/the_week_uk.recipe
+++ b/recipes/the_week_uk.recipe
@@ -9,7 +9,7 @@ class TheWeek(BasicNewsRecipe):
title = 'The Week'
__author__ = 'unkn0wn'
description = (
- 'The Week is for readers who want to know what\'s going on in the world, without having to read '
+ "The Week is for readers who want to know what's going on in the world, without having to read "
'several daily newspapers or get wrapped up in the endless news cycle. For every important story, '
'our editors carefully select commentary from all sides of the debate and artfully stitch them together '
'into one concise read. By showing you every perspective, we enable you to form your own opinion.'
diff --git a/recipes/theecocolapse.recipe b/recipes/theecocolapse.recipe
index 9131883289..d90a340282 100644
--- a/recipes/theecocolapse.recipe
+++ b/recipes/theecocolapse.recipe
@@ -20,10 +20,10 @@ class TheEconomicCollapse(BasicNewsRecipe):
use_embedded_content = False
language = 'en'
remove_empty_feeds = True
- extra_css = """
+ extra_css = '''
body{font-family: Tahoma,Arial,sans-serif }
img{margin-bottom: 0.4em; display: block;}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
diff --git a/recipes/theeconomictimes_india.recipe b/recipes/theeconomictimes_india.recipe
index 4a4762cc90..b824d1258f 100644
--- a/recipes/theeconomictimes_india.recipe
+++ b/recipes/theeconomictimes_india.recipe
@@ -54,7 +54,7 @@ class TheEconomicTimes(BasicNewsRecipe):
def preprocess_html(self, soup):
for image in soup.findAll('img', attrs={'src': True}):
- image['src'] = image['src'].replace("width-300", "width-640")
+ image['src'] = image['src'].replace('width-300', 'width-640')
for img in soup.findAll('img', attrs={'data-original': True}):
img['src'] = img['data-original'].replace('photo', 'thumb').replace('quality-100', 'quality-100,width-600,resizemode-4')
return soup
diff --git a/recipes/theeconomictimes_india_print_edition.recipe b/recipes/theeconomictimes_india_print_edition.recipe
index 6895d870fd..415097ea48 100644
--- a/recipes/theeconomictimes_india_print_edition.recipe
+++ b/recipes/theeconomictimes_india_print_edition.recipe
@@ -40,7 +40,7 @@ class TheEconomicTimes(BasicNewsRecipe):
try:
br.open(cover)
except:
- self.log("\nCover unavailable")
+ self.log('\nCover unavailable')
cover = None
return cover
@@ -81,7 +81,7 @@ class TheEconomicTimes(BasicNewsRecipe):
secname = re.sub(r'[0-9]', '', sec)
self.log(secname)
articles = []
- for h3 in section.findAll(("h1", "h3", "h4", "h5")):
+ for h3 in section.findAll(('h1', 'h3', 'h4', 'h5')):
span = h3.find(
'span',
href=lambda x: x and x.startswith('https://economictimes.indiatimes.com/epaper/'),
@@ -102,7 +102,7 @@ class TheEconomicTimes(BasicNewsRecipe):
def preprocess_html(self, soup):
for image in soup.findAll('img', attrs={'src': True}):
- image['src'] = image['src'].replace("width-300", "width-640")
+ image['src'] = image['src'].replace('width-300', 'width-640')
for img in soup.findAll('img', attrs={'data-original': True}):
img['src'] = img['data-original']
return soup
diff --git a/recipes/thenewcriterion.recipe b/recipes/thenewcriterion.recipe
index cb56af5181..732d1ecac7 100644
--- a/recipes/thenewcriterion.recipe
+++ b/recipes/thenewcriterion.recipe
@@ -45,9 +45,9 @@ class TheNewCriterion(BasicNewsRecipe):
fetch_retries = 10
auto_cleanup = True
masthead_url = 'https://www.newcriterion.com/themes/thenewcriterion/assets/img/horizontal-logo.svg'
- extra_css = """
+ extra_css = '''
body{font-family: Galliard, serif}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language
@@ -112,6 +112,6 @@ class TheNewCriterion(BasicNewsRecipe):
self.temp_files.append(tfile)
result = tfile.name
except:
- print("Retrying download...")
+ print('Retrying download...')
count += 1
return result
diff --git a/recipes/theoldie.recipe b/recipes/theoldie.recipe
index 449064a5bf..7882e1b31d 100644
--- a/recipes/theoldie.recipe
+++ b/recipes/theoldie.recipe
@@ -219,31 +219,31 @@ class PrivateEyeRecipe(BasicNewsRecipe):
# We remove vast swathes of HTML which is not part of the articles.
remove_tags_before = [
- {'name': 'div', 'class': "container"},
- {'name': 'div', 'class': "content-wrapper"},
- {'name': 'div', 'class': "only-in-the-magazine"},
+ {'name': 'div', 'class': 'container'},
+ {'name': 'div', 'class': 'content-wrapper'},
+ {'name': 'div', 'class': 'only-in-the-magazine'},
]
remove_tags_after = [
- {'name': 'div', 'class': "container"},
- {'name': 'div', 'class': "content-wrapper"},
- {'name': 'h2', 'string': "Find out more about The Oldie"},
+ {'name': 'div', 'class': 'container'},
+ {'name': 'div', 'class': 'content-wrapper'},
+ {'name': 'h2', 'string': 'Find out more about The Oldie'},
]
# Remove non-sibling content
remove_tags = [
- {'name': 'nav', 'class': "categories"},
- {'name': 'div', 'class': "internal-placeholders"},
- {'name': 'div', 'class': "leaderboard"},
- {'name': 'div', 'class': "share"},
- {'name': 'div', 'class': "most-popular"},
- {'name': 'div', 'class': "article-convert"},
+ {'name': 'nav', 'class': 'categories'},
+ {'name': 'div', 'class': 'internal-placeholders'},
+ {'name': 'div', 'class': 'leaderboard'},
+ {'name': 'div', 'class': 'share'},
+ {'name': 'div', 'class': 'most-popular'},
+ {'name': 'div', 'class': 'article-convert'},
# {'name': 'p', 'class': "article-convert"},
# {'name': 'p', 'class': "meta"},
{'name': 'hr'},
- {'name': 'a', 'class': "view-full-screen"},
- {'name': 'div', 'class': "image-counter"},
- {'name': 'h2', 'string': "Find out more about The Oldie"},
- {'name': 'a', 'href': re.compile(r"^https?:\/\/issuu.com\/")},
- {'name': 'img', 'src': re.compile(r"\/assets\/images\/icons\/icon-")},
+ {'name': 'a', 'class': 'view-full-screen'},
+ {'name': 'div', 'class': 'image-counter'},
+ {'name': 'h2', 'string': 'Find out more about The Oldie'},
+ {'name': 'a', 'href': re.compile(r'^https?:\/\/issuu.com\/')},
+ {'name': 'img', 'src': re.compile(r'\/assets\/images\/icons\/icon-')},
]
# The following extra css is to tweak the formatting of various elements of various article pages.
diff --git a/recipes/tijd.recipe b/recipes/tijd.recipe
index 4a0507343b..987d880b1c 100644
--- a/recipes/tijd.recipe
+++ b/recipes/tijd.recipe
@@ -76,12 +76,12 @@ class DeTijd(BasicNewsRecipe):
soup.html['lang'] = self.lang
soup.html['dir'] = self.direction
mlang = new_tag(
- soup, 'meta', [("http-equiv", "Content-Language"),
- ("content", self.lang)]
+ soup, 'meta', [('http-equiv', 'Content-Language'),
+ ('content', self.lang)]
)
mcharset = new_tag(
- soup, 'meta', [("http-equiv", "Content-Type"),
- ("content", "text/html; charset=utf-8")]
+ soup, 'meta', [('http-equiv', 'Content-Type'),
+ ('content', 'text/html; charset=utf-8')]
)
soup.head.insert(0, mlang)
soup.head.insert(1, mcharset)
diff --git a/recipes/toi.recipe b/recipes/toi.recipe
index ebe7c52712..023bcf2f8b 100644
--- a/recipes/toi.recipe
+++ b/recipes/toi.recipe
@@ -29,7 +29,7 @@ class TheEconomicTimes(BasicNewsRecipe):
language = 'en_IN'
publication_type = 'newspaper'
masthead_url = 'http://timesofindia.indiatimes.com/photo.cms?msid=2419189'
- extra_css = """
+ extra_css = '''
body{font-family: Arial,Helvetica,sans-serif}
.foto_mg{font-size: 60%;
font-weight: 700;}
@@ -37,7 +37,7 @@ class TheEconomicTimes(BasicNewsRecipe):
artdate{font-size: 60%}
artag{font-size: 60%}
div.storycontent{padding-top: 10px}
- """
+ '''
conversion_options = {'comment': description,
'tags': category,
'publisher': publisher,
diff --git a/recipes/toiprint.recipe b/recipes/toiprint.recipe
index 3bc662766d..8fa9627f67 100644
--- a/recipes/toiprint.recipe
+++ b/recipes/toiprint.recipe
@@ -97,7 +97,7 @@ class toiprint(BasicNewsRecipe):
else:
desc = 'Page No.' + url.split('_')[-3] + ' | ' + art.get('ColumnTitle', '')
self.log('\t', title, '\n\t', desc.replace('\n', ''))
- feeds_dict[section].append({"title": title, "url": url, "description": desc})
+ feeds_dict[section].append({'title': title, 'url': url, 'description': desc})
def sort_key(x):
section = x[0]
try:
diff --git a/recipes/tyzden.recipe b/recipes/tyzden.recipe
index 838ddf6965..a2623db7dc 100644
--- a/recipes/tyzden.recipe
+++ b/recipes/tyzden.recipe
@@ -50,7 +50,7 @@ class Tyzden(BasicNewsRecipe):
'attrs': {
'class': re.compile(r'\barticle\b')}}, ]
- extra_css = """.theme-highlight {
+ extra_css = '''.theme-highlight {
color: #999;
text-decoration: none;
}
@@ -90,7 +90,7 @@ class Tyzden(BasicNewsRecipe):
.teaser__title .highlight {
color: #000;
}
- """
+ '''
def get_browser(self):
br = BasicNewsRecipe.get_browser(self)
diff --git a/recipes/ugeskriftet.recipe b/recipes/ugeskriftet.recipe
index d91e5bf0c6..6a58804098 100644
--- a/recipes/ugeskriftet.recipe
+++ b/recipes/ugeskriftet.recipe
@@ -23,13 +23,13 @@ class Ugeskriftet(BasicNewsRecipe):
]})
]
remove_tags = [dict(name='img')]
- extra_css = """
+ extra_css = '''
h1{font-weight: bold; font-size: large;}
b{font-weight: bold; font-size: medium;}
h2{font-weight: bold; font-size: large;}
h3{font-weight: bold; font-size: large;}
h4{font-weight: bold; font-size: large;}
- """
+ '''
feeds = [
('Ugeskriftet for læger', 'https://ugeskriftet.dk/rss/forside'),
diff --git a/recipes/uncrate.recipe b/recipes/uncrate.recipe
index 86056a2ec8..549d5f6af3 100644
--- a/recipes/uncrate.recipe
+++ b/recipes/uncrate.recipe
@@ -55,9 +55,9 @@ class Uncrate(BasicNewsRecipe):
def preprocess_html(self, soup):
mlang = new_tag(soup, 'meta', [
- ("http-equiv", "Content-Language"), ("content", self.lang)])
+ ('http-equiv', 'Content-Language'), ('content', self.lang)])
mcharset = new_tag(soup, 'meta', [
- ("http-equiv", "Content-Type"), ("content", "text/html; charset=utf-8")])
+ ('http-equiv', 'Content-Type'), ('content', 'text/html; charset=utf-8')])
soup.head.insert(0, mlang)
soup.head.insert(1, mcharset)
for item in soup.findAll(style=True):
diff --git a/recipes/unian_net_en.recipe b/recipes/unian_net_en.recipe
index 55d9a43f64..9fe8869f19 100644
--- a/recipes/unian_net_en.recipe
+++ b/recipes/unian_net_en.recipe
@@ -7,7 +7,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class Unian(BasicNewsRecipe):
title = 'UNIAN'
description = ('UNIAN (Ukrainian Independent News Agency of News) is the largest independent news agency,'
- ' first in Ukraine, founded in 1993, remaining the leader among the country\'s news media,'
+ " first in Ukraine, founded in 1993, remaining the leader among the country's news media,"
' being the most cited source of news from across Ukraine.')
__author__ = 'bugmen00t'
publication_type = 'newspaper'
diff --git a/recipes/vancouver_province.recipe b/recipes/vancouver_province.recipe
index 838be31c73..f6281b62e6 100644
--- a/recipes/vancouver_province.recipe
+++ b/recipes/vancouver_province.recipe
@@ -164,7 +164,7 @@ class CanWestPaper(BasicNewsRecipe):
continue
break
if daysback == 7:
- self.log("\nCover unavailable")
+ self.log('\nCover unavailable')
cover = None
return cover
@@ -183,18 +183,18 @@ class CanWestPaper(BasicNewsRecipe):
def fixChars(self, string):
# Replace lsquo (\x91)
- fixed = re.sub("\x91", "‘", string)
+ fixed = re.sub('\x91', '‘', string)
# Replace rsquo (\x92)
- fixed = re.sub("\x92", "’", fixed)
+ fixed = re.sub('\x92', '’', fixed)
# Replace ldquo (\x93)
- fixed = re.sub("\x93", "“", fixed)
+ fixed = re.sub('\x93', '“', fixed)
# Replace rdquo (\x94)
- fixed = re.sub("\x94", "”", fixed)
+ fixed = re.sub('\x94', '”', fixed)
# Replace ndash (\x96)
- fixed = re.sub("\x96", "–", fixed)
+ fixed = re.sub('\x96', '–', fixed)
# Replace mdash (\x97)
- fixed = re.sub("\x97", "—", fixed)
- fixed = re.sub("’", "’", fixed)
+ fixed = re.sub('\x97', '—', fixed)
+ fixed = re.sub('’', '’', fixed)
return fixed
def massageNCXText(self, description):
@@ -275,10 +275,10 @@ class CanWestPaper(BasicNewsRecipe):
if url.startswith('/'):
url = self.url_prefix + url
if not url.startswith(self.url_prefix):
- print("Rejected " + url)
+ print('Rejected ' + url)
return
if url in self.url_list:
- print("Rejected dup " + url)
+ print('Rejected dup ' + url)
return
self.url_list.append(url)
title = self.tag_to_string(atag, False)
@@ -290,8 +290,8 @@ class CanWestPaper(BasicNewsRecipe):
return
dtag = adiv.find('div', 'content')
description = ''
- print("URL " + url)
- print("TITLE " + title)
+ print('URL ' + url)
+ print('TITLE ' + title)
if dtag is not None:
stag = dtag.span
if stag is not None:
@@ -299,18 +299,18 @@ class CanWestPaper(BasicNewsRecipe):
description = self.tag_to_string(stag, False)
else:
description = self.tag_to_string(dtag, False)
- print("DESCRIPTION: " + description)
+ print('DESCRIPTION: ' + description)
if key not in articles:
articles[key] = []
articles[key].append(dict(
title=title, url=url, date='', description=description, author='', content=''))
def parse_web_index(key, keyurl):
- print("Section: " + key + ': ' + self.url_prefix + keyurl)
+ print('Section: ' + key + ': ' + self.url_prefix + keyurl)
try:
soup = self.index_to_soup(self.url_prefix + keyurl)
except:
- print("Section: " + key + ' NOT FOUND')
+ print('Section: ' + key + ' NOT FOUND')
return
ans.append(key)
mainsoup = soup.find('div', 'bodywrapper')
diff --git a/recipes/vancouver_sun.recipe b/recipes/vancouver_sun.recipe
index 4c84e58c0f..5326e44731 100644
--- a/recipes/vancouver_sun.recipe
+++ b/recipes/vancouver_sun.recipe
@@ -165,24 +165,24 @@ class CanWestPaper(BasicNewsRecipe):
continue
break
if daysback == 7:
- self.log("\nCover unavailable")
+ self.log('\nCover unavailable')
cover = None
return cover
def fixChars(self, string):
# Replace lsquo (\x91)
- fixed = re.sub("\x91", "‘", string)
+ fixed = re.sub('\x91', '‘', string)
# Replace rsquo (\x92)
- fixed = re.sub("\x92", "’", fixed)
+ fixed = re.sub('\x92', '’', fixed)
# Replace ldquo (\x93)
- fixed = re.sub("\x93", "“", fixed)
+ fixed = re.sub('\x93', '“', fixed)
# Replace rdquo (\x94)
- fixed = re.sub("\x94", "”", fixed)
+ fixed = re.sub('\x94', '”', fixed)
# Replace ndash (\x96)
- fixed = re.sub("\x96", "–", fixed)
+ fixed = re.sub('\x96', '–', fixed)
# Replace mdash (\x97)
- fixed = re.sub("\x97", "—", fixed)
- fixed = re.sub("’", "’", fixed)
+ fixed = re.sub('\x97', '—', fixed)
+ fixed = re.sub('’', '’', fixed)
return fixed
def massageNCXText(self, description):
@@ -263,10 +263,10 @@ class CanWestPaper(BasicNewsRecipe):
if url.startswith('/'):
url = self.url_prefix + url
if not url.startswith(self.url_prefix):
- print("Rejected " + url)
+ print('Rejected ' + url)
return
if url in self.url_list:
- print("Rejected dup " + url)
+ print('Rejected dup ' + url)
return
self.url_list.append(url)
title = self.tag_to_string(atag, False)
@@ -278,8 +278,8 @@ class CanWestPaper(BasicNewsRecipe):
return
dtag = adiv.find('div', 'content')
description = ''
- print("URL " + url)
- print("TITLE " + title)
+ print('URL ' + url)
+ print('TITLE ' + title)
if dtag is not None:
stag = dtag.span
if stag is not None:
@@ -287,18 +287,18 @@ class CanWestPaper(BasicNewsRecipe):
description = self.tag_to_string(stag, False)
else:
description = self.tag_to_string(dtag, False)
- print("DESCRIPTION: " + description)
+ print('DESCRIPTION: ' + description)
if key not in articles:
articles[key] = []
articles[key].append(dict(
title=title, url=url, date='', description=description, author='', content=''))
def parse_web_index(key, keyurl):
- print("Section: " + key + ': ' + self.url_prefix + keyurl)
+ print('Section: ' + key + ': ' + self.url_prefix + keyurl)
try:
soup = self.index_to_soup(self.url_prefix + keyurl)
except:
- print("Section: " + key + ' NOT FOUND')
+ print('Section: ' + key + ' NOT FOUND')
return
ans.append(key)
mainsoup = soup.find('div', 'bodywrapper')
diff --git a/recipes/variety.recipe b/recipes/variety.recipe
index 26a1a12b1c..c74bd5d8f5 100644
--- a/recipes/variety.recipe
+++ b/recipes/variety.recipe
@@ -20,13 +20,13 @@ class Variety(BasicNewsRecipe):
category = 'Entertainment Industry News, Daily Variety, Movie Reviews, TV, Awards, Oscars, Cannes, Box Office, Hollywood'
language = 'en'
masthead_url = 'http://images1.variety.com/graphics/variety/Variety_logo_green_tm.gif'
- extra_css = """
+ extra_css = '''
body{font-family: Arial,Helvetica,sans-serif; font-size: 1.275em}
.date{font-size: small; border: 1px dotted rgb(204, 204, 204); font-style: italic; color: rgb(102, 102, 102); margin: 5px 0px; padding: 0.5em;}
.author{margin: 5px 0px 5px 20px; padding: 0.5em; background: none repeat scroll 0% 0% rgb(247, 247, 247);}
.art h2{color: rgb(153, 0, 0); font-size: 1.275em; font-weight: bold;}
img{margin-bottom: 1em}
- """
+ '''
conversion_options = {
'comments': description, 'tags': category, 'language': language, 'publisher': publisher
diff --git a/recipes/vecernji_list.recipe b/recipes/vecernji_list.recipe
index 5c74db613c..5bcd540f18 100644
--- a/recipes/vecernji_list.recipe
+++ b/recipes/vecernji_list.recipe
@@ -23,7 +23,7 @@ def new_tag(soup, name, attrs=()):
class VecernjiList(BasicNewsRecipe):
title = 'Vecernji List'
__author__ = 'Darko Miletic'
- description = "Vecernji.hr je vodeci hrvatski news portal. Cilj je biti prvi u objavljivanju svih vijesti iz Hrvatske, svijeta, sporta, gospodarstva, showbiza i jos mnogo vise. Uz cjelodnevni rad, novinari objavljuju preko 300 raznih vijesti svakoga dana. Vecernji.hr prati sve vaznije dogadaje specijalnim izvjestajima, video specijalima i foto galerijama." # noqa: E501
+ description = 'Vecernji.hr je vodeci hrvatski news portal. Cilj je biti prvi u objavljivanju svih vijesti iz Hrvatske, svijeta, sporta, gospodarstva, showbiza i jos mnogo vise. Uz cjelodnevni rad, novinari objavljuju preko 300 raznih vijesti svakoga dana. Vecernji.hr prati sve vaznije dogadaje specijalnim izvjestajima, video specijalima i foto galerijama.' # noqa: E501
publisher = 'Vecernji.hr'
category = 'news, politics, Croatia'
oldest_article = 2
@@ -57,9 +57,9 @@ class VecernjiList(BasicNewsRecipe):
soup.html['dir'] = self.direction
mlang = new_tag(soup, 'meta', [
- ("http-equiv", "Content-Language"), ("content", self.lang)])
+ ('http-equiv', 'Content-Language'), ('content', self.lang)])
mcharset = new_tag(soup, 'meta', [
- ("http-equiv", "Content-Type"), ("content", "text/html; charset=UTF-8")])
+ ('http-equiv', 'Content-Type'), ('content', 'text/html; charset=UTF-8')])
soup.head.insert(0, mlang)
soup.head.insert(1, mcharset)
return self.adeify_images(soup)
diff --git a/recipes/vic_times.recipe b/recipes/vic_times.recipe
index 3e11e31e39..034724e7b6 100644
--- a/recipes/vic_times.recipe
+++ b/recipes/vic_times.recipe
@@ -100,7 +100,7 @@ class TimesColonist(BasicNewsRecipe):
dict(name='div', attrs={
'class': re.compile('window')}),
dict(name='div', attrs={'class': re.compile('related.news.element')})]
- print("PROFILE NAME = " + options.output_profile.short_name)
+ print('PROFILE NAME = ' + options.output_profile.short_name)
if self.kindle_omit_images and options.output_profile.short_name in ['kindle', 'kindle_dx', 'kindle_pw']:
self.remove_tags.append(
dict(name='div', attrs={'class': re.compile('image-container')}))
@@ -127,24 +127,24 @@ class TimesColonist(BasicNewsRecipe):
continue
break
if daysback == 7:
- self.log("\nCover unavailable")
+ self.log('\nCover unavailable')
cover = None
return cover
def fixChars(self, string):
# Replace lsquo (\x91)
- fixed = re.sub("\x91", "‘", string)
+ fixed = re.sub('\x91', '‘', string)
# Replace rsquo (\x92)
- fixed = re.sub("\x92", "’", fixed)
+ fixed = re.sub('\x92', '’', fixed)
# Replace ldquo (\x93)
- fixed = re.sub("\x93", "“", fixed)
+ fixed = re.sub('\x93', '“', fixed)
# Replace rdquo (\x94)
- fixed = re.sub("\x94", "”", fixed)
+ fixed = re.sub('\x94', '”', fixed)
# Replace ndash (\x96)
- fixed = re.sub("\x96", "–", fixed)
+ fixed = re.sub('\x96', '–', fixed)
# Replace mdash (\x97)
- fixed = re.sub("\x97", "—", fixed)
- fixed = re.sub("’", "’", fixed)
+ fixed = re.sub('\x97', '—', fixed)
+ fixed = re.sub('’', '’', fixed)
return fixed
def massageNCXText(self, description):
@@ -229,11 +229,11 @@ class TimesColonist(BasicNewsRecipe):
description = self.tag_to_string(dtag, False)
article_list.append(dict(
title=title, url=url, date='', description=description, author='', content=''))
- print(sectitle + title + ": description = " +
- description + " URL=" + url + '\n\r')
+ print(sectitle + title + ': description = ' +
+ description + ' URL=' + url + '\n\r')
def add_section_index(self, ans, securl, sectitle):
- print("Add section url=" + self.url_prefix + '/' + securl + '\n\r')
+ print('Add section url=' + self.url_prefix + '/' + securl + '\n\r')
try:
soup = self.index_to_soup(self.url_prefix + '/' + securl)
except:
diff --git a/recipes/villagevoice.recipe b/recipes/villagevoice.recipe
index 7f300a94e9..69a9ab1809 100644
--- a/recipes/villagevoice.recipe
+++ b/recipes/villagevoice.recipe
@@ -9,10 +9,10 @@ class VillageVoice(BasicNewsRecipe):
title = 'Village Voice'
feeds = [
- ("Complete Issue", "http://villagevoice.com/syndication/issue"),
- ("News", "http://villagevoice.com/syndication/section/news"),
- ("Music", "http://villagevoice.com/syndication/section/music"),
- ("Movies", "http://villagevoice.com/syndication/section/film"),
+ ('Complete Issue', 'http://villagevoice.com/syndication/issue'),
+ ('News', 'http://villagevoice.com/syndication/section/news'),
+ ('Music', 'http://villagevoice.com/syndication/section/music'),
+ ('Movies', 'http://villagevoice.com/syndication/section/film'),
# ("Restaurants", "http://villagevoice.com/syndication/section/dining"),
# ("Music Events", "http://villagevoice.com/syndication/events?type=music"),
# ("Calendar Events", "http://villagevoice.com/syndication/events"),
@@ -22,7 +22,7 @@ class VillageVoice(BasicNewsRecipe):
auto_cleanup = True
max_articles_per_feed = 50
- masthead_url = "http://assets.villagevoice.com/img/citylogo.png"
+ masthead_url = 'http://assets.villagevoice.com/img/citylogo.png'
language = 'en'
__author__ = 'Barty'
diff --git a/recipes/volksrant.recipe b/recipes/volksrant.recipe
index 6fe050a769..c6cd3d6779 100644
--- a/recipes/volksrant.recipe
+++ b/recipes/volksrant.recipe
@@ -34,7 +34,7 @@ class Volkskrant(BasicNewsRecipe):
dict(attrs={'data-element-id': ['article-element-authors']}),
dict(name=['script', 'noscript', 'style']),
]
- remove_attributes = ["class", "id", "name", "style"]
+ remove_attributes = ['class', 'id', 'name', 'style']
encoding = 'utf-8'
no_stylesheets = True
ignore_duplicate_articles = {'url'}
@@ -69,7 +69,7 @@ class Volkskrant(BasicNewsRecipe):
'span', attrs={'class': 'teaser__title__value--short'}
)
).strip()
- if teaser_label.lower() == "podcast":
+ if teaser_label.lower() == 'podcast':
continue
parts = []
if teaser_label:
@@ -101,10 +101,10 @@ class Volkskrant(BasicNewsRecipe):
tag['src'] = 'https://www.volkskrant.nl' + tag['src']
for tag in soup():
- if tag.name == "picture":
- tag.replaceWith(tag.find("img"))
+ if tag.name == 'picture':
+ tag.replaceWith(tag.find('img'))
- comic_articles = { "Bas van der Schot", "Poldermodellen", "Gummbah", "Sigmund" }
+ comic_articles = { 'Bas van der Schot', 'Poldermodellen', 'Gummbah', 'Sigmund' }
if self.tag_to_string(soup.find('h1')).strip() in comic_articles:
for node in soup.find('figure').find_next_siblings():
node.extract()
@@ -116,8 +116,8 @@ class Volkskrant(BasicNewsRecipe):
'Accept': 'application/json, text/javascript, */*; q=0.01',
'DNT': '1',
}
- url = "https://login-api.e-pages.dk/v1/krant.volkskrant.nl/folders"
+ url = 'https://login-api.e-pages.dk/v1/krant.volkskrant.nl/folders'
with closing(self.browser.open(Request(url, None, headers))) as r:
folders = json.loads(r.read())
- return folders["objects"][0]["teaser_medium"]
+ return folders['objects'][0]['teaser_medium']
return None
diff --git a/recipes/vreme.recipe b/recipes/vreme.recipe
index f8b88e67eb..fc83944a97 100644
--- a/recipes/vreme.recipe
+++ b/recipes/vreme.recipe
@@ -26,7 +26,7 @@ class Vreme(BasicNewsRecipe):
language = 'sr'
publication_type = 'magazine'
masthead_url = 'http://www.vreme.com/g/vreme-logo.gif'
- extra_css = """
+ extra_css = '''
@font-face {font-family: "serif1";src:url(res:///opt/sony/ebook/FONT/tt0011m_.ttf)}
@font-face {font-family: "sans1";src:url(res:///opt/sony/ebook/FONT/tt0003m_.ttf)}
body{font-family: serif1, serif}
@@ -35,7 +35,7 @@ class Vreme(BasicNewsRecipe):
.column-heading1{font-family: sans1, sans-serif; font-size: x-large}
.column-normal{font-family: sans1, sans-serif; font-size: medium}
.large{font-family: sans1, sans-serif; font-size: large}
- """
+ '''
conversion_options = {
'comment': description, 'tags': category, 'publisher': publisher, 'language': language, 'linearize_tables': True
diff --git a/recipes/windows_star.recipe b/recipes/windows_star.recipe
index 07158419d8..89fbc5347c 100644
--- a/recipes/windows_star.recipe
+++ b/recipes/windows_star.recipe
@@ -69,14 +69,14 @@ class CanWestPaper(BasicNewsRecipe):
ans = ['News']
# Find each instance of class="sectiontitle", class="featurecontent"
- for divtag in soup.findAll('div', attrs={'class': ["section_title02", "featurecontent"]}):
+ for divtag in soup.findAll('div', attrs={'class': ['section_title02', 'featurecontent']}):
if 'section_title' in ''.join(divtag['class']):
# div contains section title
if not divtag.h3:
continue
key = self.tag_to_string(divtag.h3, False)
ans.append(key)
- self.log("Section name %s" % key)
+ self.log('Section name %s' % key)
continue
# div contains article data
h1tag = divtag.find('h1')
diff --git a/recipes/windsor_star.recipe b/recipes/windsor_star.recipe
index 763f8cfa4f..c28eebe4f6 100644
--- a/recipes/windsor_star.recipe
+++ b/recipes/windsor_star.recipe
@@ -123,24 +123,24 @@ class CanWestPaper(BasicNewsRecipe):
continue
break
if daysback == 7:
- self.log("\nCover unavailable")
+ self.log('\nCover unavailable')
cover = None
return cover
def fixChars(self, string):
# Replace lsquo (\x91)
- fixed = re.sub("\x91", "‘", string)
+ fixed = re.sub('\x91', '‘', string)
# Replace rsquo (\x92)
- fixed = re.sub("\x92", "’", fixed)
+ fixed = re.sub('\x92', '’', fixed)
# Replace ldquo (\x93)
- fixed = re.sub("\x93", "“", fixed)
+ fixed = re.sub('\x93', '“', fixed)
# Replace rdquo (\x94)
- fixed = re.sub("\x94", "”", fixed)
+ fixed = re.sub('\x94', '”', fixed)
# Replace ndash (\x96)
- fixed = re.sub("\x96", "–", fixed)
+ fixed = re.sub('\x96', '–', fixed)
# Replace mdash (\x97)
- fixed = re.sub("\x97", "—", fixed)
- fixed = re.sub("’", "’", fixed)
+ fixed = re.sub('\x97', '—', fixed)
+ fixed = re.sub('’', '’', fixed)
return fixed
def massageNCXText(self, description):
@@ -180,7 +180,7 @@ class CanWestPaper(BasicNewsRecipe):
ans = ['News']
# Find each instance of class="sectiontitle", class="featurecontent"
- for divtag in soup.findAll('div', attrs={'class': ["section_title02", "featurecontent"]}):
+ for divtag in soup.findAll('div', attrs={'class': ['section_title02', 'featurecontent']}):
# self.log(" div class = %s" % divtag['class'])
if ''.join(divtag['class']).startswith('section_title'):
# div contains section title
@@ -188,7 +188,7 @@ class CanWestPaper(BasicNewsRecipe):
continue
key = self.tag_to_string(divtag.h3, False)
ans.append(key)
- self.log("Section name %s" % key)
+ self.log('Section name %s' % key)
continue
# div contains article data
h1tag = divtag.find('h1')
diff --git a/recipes/wired.recipe b/recipes/wired.recipe
index cc1d5d021a..86e64b7929 100644
--- a/recipes/wired.recipe
+++ b/recipes/wired.recipe
@@ -32,14 +32,14 @@ class WiredDailyNews(BasicNewsRecipe):
language = 'en'
ignore_duplicate_articles = {'url'}
remove_empty_feeds = True
- extra_css = """
+ extra_css = '''
.entry-header{
text-transform: uppercase;
vertical-align: baseline;
display: inline;
}
ul li{display: inline}
- """
+ '''
keep_only_tags = [
classes('content-header lead-asset article__body'),
@@ -57,7 +57,7 @@ class WiredDailyNews(BasicNewsRecipe):
self.log('Parsing index page', currenturl)
soup = self.index_to_soup(currenturl)
baseurl = 'https://www.wired.com'
- for a in soup.find("div", {"class" : 'multi-packages'}).findAll('a', href=True):
+ for a in soup.find('div', {'class' : 'multi-packages'}).findAll('a', href=True):
url = a['href']
if url.startswith('/story') and url.endswith('/'):
title = self.tag_to_string(a.parent.find('h3'))
diff --git a/recipes/wired_daily.recipe b/recipes/wired_daily.recipe
index c8e1918423..f73c682219 100644
--- a/recipes/wired_daily.recipe
+++ b/recipes/wired_daily.recipe
@@ -37,14 +37,14 @@ class WiredDailyNews(BasicNewsRecipe):
ignore_duplicate_articles = {'url'}
remove_empty_feeds = True
publication_type = 'newsportal'
- extra_css = """
+ extra_css = '''
.entry-header{
text-transform: uppercase;
vertical-align: baseline;
display: inline;
}
ul li{display: inline}
- """
+ '''
recipe_specific_options = {
'days': {
diff --git a/recipes/words_without_borders.recipe b/recipes/words_without_borders.recipe
index 136ed8f14c..cc3a9b7c74 100644
--- a/recipes/words_without_borders.recipe
+++ b/recipes/words_without_borders.recipe
@@ -19,7 +19,7 @@ class AdvancedUserRecipe1308302002(BasicNewsRecipe):
remove_tags_after = [
{'class': 'addthis_toolbox addthis_default_style no_print'}]
remove_tags = [{'class': ['posterous_quote_citation', 'button']}]
- extra_css = """
- h1{font-family: Georgia,serif; font-size: large}h2{font-family: Georgia,serif; font-size: large} """
+ extra_css = '''
+ h1{font-family: Georgia,serif; font-size: large}h2{font-family: Georgia,serif; font-size: large} '''
feeds = [(u'wwb', u'http://feeds.feedburner.com/wwborders?format=xml')]
diff --git a/recipes/wsj.recipe b/recipes/wsj.recipe
index a588271ce0..076925551c 100644
--- a/recipes/wsj.recipe
+++ b/recipes/wsj.recipe
@@ -14,7 +14,7 @@ class WSJ(BasicNewsRecipe):
__author__ = 'unkn0wn'
description = (
'The Print Edition of WSJ. The Wall Street Journal is your source for breaking news, analysis and insights from the U.S. and '
- 'around the world, the world\'s leading business and finance publication.'
+ "around the world, the world's leading business and finance publication."
)
language = 'en_US'
masthead_url = 'https://s.wsj.net/media/wsj_amp_masthead_lg.png'
diff --git a/recipes/wsj_free.recipe b/recipes/wsj_free.recipe
index d818c5ec7b..7ba539a6d9 100644
--- a/recipes/wsj_free.recipe
+++ b/recipes/wsj_free.recipe
@@ -180,7 +180,7 @@ class WSJ(BasicNewsRecipe):
# you can get the version below from lib-min.js
# search for: "\d+\.\d+\.\d+"
# This might need to be updated in the future
- auth0_client = json.dumps({"name": "auth0.js-ulp", "version": "9.11.3"})
+ auth0_client = json.dumps({'name': 'auth0.js-ulp', 'version': '9.11.3'})
if not isinstance(auth0_client, bytes):
auth0_client = auth0_client.encode('utf-8')
auth0_client = standard_b64encode(auth0_client)
@@ -260,7 +260,7 @@ class WSJ(BasicNewsRecipe):
articles.append({'title': title, 'url': url,
'description': desc, 'date': ''})
self.log('\tFound article:', title)
- self.log('\t\t', desc + " " + url)
+ self.log('\t\t', desc + ' ' + url)
if self.test and len(articles) >= self.test[1]:
break
diff --git a/recipes/wsj_news.recipe b/recipes/wsj_news.recipe
index 4d0c53a9fb..47777dcb7e 100644
--- a/recipes/wsj_news.recipe
+++ b/recipes/wsj_news.recipe
@@ -14,7 +14,7 @@ class WSJ(BasicNewsRecipe):
__author__ = 'unkn0wn'
description = (
'The Wall Street Journal is your source for breaking news, analysis and insights from the U.S. and '
- 'around the world, the world\'s leading business and finance publication. Get the Latest News here.'
+ "around the world, the world's leading business and finance publication. Get the Latest News here."
)
language = 'en_US'
masthead_url = 'https://s.wsj.net/media/wsj_amp_masthead_lg.png'
diff --git a/recipes/yomiuri_world.recipe b/recipes/yomiuri_world.recipe
index f7babc563a..144c77f1e5 100644
--- a/recipes/yomiuri_world.recipe
+++ b/recipes/yomiuri_world.recipe
@@ -21,9 +21,9 @@ class YOLNews(BasicNewsRecipe):
encoding = 'UTF-8'
index = 'http://www.yomiuri.co.jp/world/'
remove_javascript = True
- masthead_title = u"YOMIURI ONLINE"
+ masthead_title = u'YOMIURI ONLINE'
- keep_only_tags = [{'class': "article text-resizeable"}]
+ keep_only_tags = [{'class': 'article text-resizeable'}]
def parse_feeds(self):
feeds = BasicNewsRecipe.parse_feeds(self)
diff --git a/recipes/zaobao.recipe b/recipes/zaobao.recipe
index 5618a99fd8..83f1c855ca 100644
--- a/recipes/zaobao.recipe
+++ b/recipes/zaobao.recipe
@@ -172,7 +172,7 @@ class ZAOBAO(BasicNewsRecipe):
article.text_summary = article.text_summary.encode(
'cp1252', 'replace').decode(self.encoding, 'replace')
- if article.title == "Untitled article":
+ if article.title == 'Untitled article':
self.log(_('Removing empty article %s from %s') %
(article.title, article.url))
# remove the article
diff --git a/recipes/zdnet.fr.recipe b/recipes/zdnet.fr.recipe
index 585f040344..00c83323e4 100644
--- a/recipes/zdnet.fr.recipe
+++ b/recipes/zdnet.fr.recipe
@@ -62,6 +62,6 @@ class zdnet(BasicNewsRecipe):
try:
br.open(masthead)
except:
- self.log("\nCover unavailable")
+ self.log('\nCover unavailable')
masthead = None
return masthead
diff --git a/recipes/zeitde_sub.recipe b/recipes/zeitde_sub.recipe
index 5ec52cdc9d..a255354257 100644
--- a/recipes/zeitde_sub.recipe
+++ b/recipes/zeitde_sub.recipe
@@ -7,9 +7,9 @@ __copyright__ = '2010, Steffen Siebert '
__docformat__ = 'restructuredtext de'
__version__ = '1.5'
-"""
+'''
Die Zeit EPUB
-"""
+'''
import io
import os
@@ -184,7 +184,7 @@ class ZeitEPUBAbo(BasicNewsRecipe):
]
def build_index(self):
- url = "https://meine.zeit.de/anmelden?url=https%3A//epaper.zeit.de/abo/diezeit"
+ url = 'https://meine.zeit.de/anmelden?url=https%3A//epaper.zeit.de/abo/diezeit'
browser = self.get_browser()
# new login process
@@ -241,7 +241,7 @@ class ZeitEPUBAbo(BasicNewsRecipe):
self.report_progress(
0, _('trying to download cover image (titlepage)'))
self.download_cover()
- self.conversion_options["cover"] = self.cover_path
+ self.conversion_options['cover'] = self.cover_path
return index
@@ -250,7 +250,7 @@ class ZeitEPUBAbo(BasicNewsRecipe):
self.log.warning('Downloading cover')
try:
self.log.warning('Trying PDF-based cover')
- url = "https://meine.zeit.de/anmelden?url=https%3A//epaper.zeit.de/abo/diezeit"
+ url = 'https://meine.zeit.de/anmelden?url=https%3A//epaper.zeit.de/abo/diezeit'
browser = self.get_browser()
# new login process
diff --git a/recipes/zerodeux.recipe b/recipes/zerodeux.recipe
index 1fc7dda613..45e25cbe8d 100644
--- a/recipes/zerodeux.recipe
+++ b/recipes/zerodeux.recipe
@@ -10,7 +10,7 @@ from calibre.web.feeds.news import BasicNewsRecipe
class ZeroDeuxRecipe(BasicNewsRecipe):
title = 'Zérodeux'
__author__ = 'Kabonix'
- description = 'Revue d\'art contemporain trimestrielle'
+ description = "Revue d'art contemporain trimestrielle"
publisher = 'Zérodeux'
category = 'art, contemporary art, criticism'
language = 'fr'
diff --git a/recipes/zycie_warszawy.recipe b/recipes/zycie_warszawy.recipe
index 60052d9827..07b7e1c85e 100644
--- a/recipes/zycie_warszawy.recipe
+++ b/recipes/zycie_warszawy.recipe
@@ -47,5 +47,5 @@ class zyciewarszawy(BasicNewsRecipe):
preprocess_regexps = [(re.compile(r',3.jpg'), lambda m: ',2.jpg')]
def print_version(self, url):
- url += "?print=tak"
+ url += '?print=tak'
return url
diff --git a/resources/default_tweaks.py b/resources/default_tweaks.py
index b25f68f19a..546916e809 100644
--- a/resources/default_tweaks.py
+++ b/resources/default_tweaks.py
@@ -427,7 +427,7 @@ maximum_cover_size = (1650, 2200)
# control where it is sent. Valid values are "main", "carda", "cardb". Note
# that if there isn't enough free space available on the location you choose,
# the files will be sent to the location with the most free space.
-send_news_to_device_location = "main"
+send_news_to_device_location = 'main'
#: Unified toolbar on macOS
# If you enable this option and restart calibre, the toolbar will be 'unified'
diff --git a/ruff-strict-pep8.toml b/ruff-strict-pep8.toml
index 9155df0968..986c49a613 100644
--- a/ruff-strict-pep8.toml
+++ b/ruff-strict-pep8.toml
@@ -18,7 +18,7 @@ quote-style = 'single'
[lint]
ignore = ['E402', 'E722', 'E741']
-select = ['E', 'F', 'I', 'W', 'INT']
+select = ['E', 'F', 'I', 'W', 'INT', 'Q']
[lint.per-file-ignores]
"src/calibre/ebooks/unihandecode/*codepoints.py" = ['E501']
@@ -36,3 +36,9 @@ section-order = ['__python__', "future", "standard-library", "third-party", "fir
[lint.isort.sections]
'__python__' = ['__python__']
+
+[lint.flake8-quotes]
+avoid-escape = true
+docstring-quotes = 'single'
+inline-quotes = 'single'
+multiline-quotes = 'single'
diff --git a/setup/build.py b/setup/build.py
index 3e04d8ae5a..d5599db17c 100644
--- a/setup/build.py
+++ b/setup/build.py
@@ -61,7 +61,7 @@ class Extension:
self.headers = d['headers'] = absolutize(kwargs.get('headers', []))
self.sip_files = d['sip_files'] = absolutize(kwargs.get('sip_files', []))
self.needs_exceptions = d['needs_exceptions'] = kwargs.get('needs_exceptions', False)
- self.qt_modules = d['qt_modules'] = kwargs.get('qt_modules', ["widgets"])
+ self.qt_modules = d['qt_modules'] = kwargs.get('qt_modules', ['widgets'])
self.inc_dirs = d['inc_dirs'] = absolutize(kwargs.get('inc_dirs', []))
self.lib_dirs = d['lib_dirs'] = absolutize(kwargs.get('lib_dirs', []))
self.extra_objs = d['extra_objs'] = absolutize(kwargs.get('extra_objs', []))
@@ -121,7 +121,7 @@ def is_ext_allowed(cross_compile_for: str, ext: Extension) -> bool:
if islinux and only == cross_compile_for:
return True
only = set(only.split())
- q = set(filter(lambda x: globals()["is" + x], ["bsd", "freebsd", "haiku", "linux", "macos", "windows"]))
+ q = set(filter(lambda x: globals()['is' + x], ['bsd', 'freebsd', 'haiku', 'linux', 'macos', 'windows']))
return len(q.intersection(only)) > 0
return True
@@ -541,8 +541,8 @@ class Build(Command):
'-DCALIBRE_MODINIT_FUNC='
'{} __attribute__ ((visibility ("default"))) {}'.format(extern_decl, return_type)]
if ext.needs_cxx and ext.needs_cxx_std:
- if env.cc_output_flag.startswith('/') and ext.needs_cxx == "11":
- ext.needs_cxx = "14"
+ if env.cc_output_flag.startswith('/') and ext.needs_cxx == '11':
+ ext.needs_cxx = '14'
cflags.append(env.std_prefix + 'c++' + ext.needs_cxx_std)
if ext.needs_c_std and not env.std_prefix.startswith('/'):
@@ -618,7 +618,7 @@ class Build(Command):
subprocess.check_call(*args, **kwargs)
except:
cmdline = ' '.join(['"%s"' % (arg) if ' ' in arg else arg for arg in args[0]])
- print("Error while executing: %s\n" % (cmdline))
+ print('Error while executing: %s\n' % (cmdline))
raise
def build_headless(self):
diff --git a/setup/hosting.py b/setup/hosting.py
index 6d54cf67dc..2f270ad111 100644
--- a/setup/hosting.py
+++ b/setup/hosting.py
@@ -224,7 +224,7 @@ class GitHub(Base): # {{{
def fail(self, r, msg):
print(msg, ' Status Code: %s' % r.status_code, file=sys.stderr)
- print("JSON from response:", file=sys.stderr)
+ print('JSON from response:', file=sys.stderr)
pprint(dict(r.json()), stream=sys.stderr)
raise SystemExit(1)
diff --git a/setup/install.py b/setup/install.py
index 38930b5595..0d6c8d48e3 100644
--- a/setup/install.py
+++ b/setup/install.py
@@ -68,7 +68,7 @@ class Develop(Command):
dest='fatal_errors', help='If set die on post install errors.')
parser.add_option('--no-postinstall', action='store_false',
dest='postinstall', default=True,
- help='Don\'t run post install actions like creating MAN pages, setting'+
+ help="Don't run post install actions like creating MAN pages, setting"+
' up desktop integration and so on')
def add_options(self, parser):
diff --git a/setup/installers.py b/setup/installers.py
index 2a2de8809d..a45f689701 100644
--- a/setup/installers.py
+++ b/setup/installers.py
@@ -253,7 +253,7 @@ class ExtDev(Command):
ext_dir = build_only(which, '', ext)
src = os.path.join(ext_dir, f'{ext}.so')
print(
- "\n\n\x1b[33;1mWARNING: This does not work on macOS, unless you use un-signed builds with ",
+ '\n\n\x1b[33;1mWARNING: This does not work on macOS, unless you use un-signed builds with ',
' ./update-on-ox develop\x1b[m',
file=sys.stderr, end='\n\n\n')
host = 'ox'
diff --git a/setup/plugins_mirror.py b/setup/plugins_mirror.py
index 325b4d7dea..9c6e8adb48 100644
--- a/setup/plugins_mirror.py
+++ b/setup/plugins_mirror.py
@@ -62,7 +62,7 @@ socket.setdefaulttimeout(30)
def read(url, get_info=False): # {{{
- if url.startswith("file://"):
+ if url.startswith('file://'):
return urlopen(url).read()
opener = build_opener()
opener.addheaders = [
diff --git a/setup/publish.py b/setup/publish.py
index 968860a8f5..1a8cccf29d 100644
--- a/setup/publish.py
+++ b/setup/publish.py
@@ -237,13 +237,13 @@ class Manual(Command):
from polyglot.http_server import HTTPServer, SimpleHTTPRequestHandler
HandlerClass = SimpleHTTPRequestHandler
ServerClass = HTTPServer
- Protocol = "HTTP/1.0"
+ Protocol = 'HTTP/1.0'
server_address = ('127.0.0.1', 8000)
HandlerClass.protocol_version = Protocol
httpd = ServerClass(server_address, HandlerClass)
- print("Serving User Manual on localhost:8000")
+ print('Serving User Manual on localhost:8000')
from calibre.gui2 import open_url
open_url('http://localhost:8000')
httpd.serve_forever()
diff --git a/setup/upload.py b/setup/upload.py
index 7c0076d339..dbb5c56209 100644
--- a/setup/upload.py
+++ b/setup/upload.py
@@ -28,8 +28,8 @@ if __name__ == '__main__':
from setup import Command, __appname__, __version__, installer_names
DOWNLOADS = '/srv/main/downloads'
-HTML2LRF = "calibre/ebooks/lrf/html/demo"
-TXT2LRF = "src/calibre/ebooks/lrf/txt/demo"
+HTML2LRF = 'calibre/ebooks/lrf/html/demo'
+TXT2LRF = 'src/calibre/ebooks/lrf/txt/demo'
STAGING_HOST = 'download.calibre-ebook.com'
BACKUP_HOST = 'code.calibre-ebook.com'
STAGING_USER = BACKUP_USER = 'root'
diff --git a/setup/vcvars.py b/setup/vcvars.py
index 2461e4fc9c..99cfb16d41 100644
--- a/setup/vcvars.py
+++ b/setup/vcvars.py
@@ -26,8 +26,8 @@ def get_program_files_location(which=CSIDL_PROGRAM_FILESX86):
def find_vswhere():
for which in (CSIDL_PROGRAM_FILESX86, CSIDL_PROGRAM_FILES):
root = get_program_files_location(which)
- vswhere = os.path.join(root, "Microsoft Visual Studio", "Installer",
- "vswhere.exe")
+ vswhere = os.path.join(root, 'Microsoft Visual Studio', 'Installer',
+ 'vswhere.exe')
if os.path.exists(vswhere):
return vswhere
raise SystemExit('Could not find vswhere.exe')
@@ -41,24 +41,24 @@ def get_output(*cmd):
def find_visual_studio():
path = get_output(
find_vswhere(),
- "-latest",
- "-requires",
- "Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
- "-property",
- "installationPath",
- "-products",
- "*"
+ '-latest',
+ '-requires',
+ 'Microsoft.VisualStudio.Component.VC.Tools.x86.x64',
+ '-property',
+ 'installationPath',
+ '-products',
+ '*'
).strip()
- return os.path.join(path, "VC", "Auxiliary", "Build")
+ return os.path.join(path, 'VC', 'Auxiliary', 'Build')
@lru_cache
def find_msbuild():
base_path = get_output(
find_vswhere(),
- "-latest",
- "-requires", "Microsoft.Component.MSBuild",
- "-property", 'installationPath'
+ '-latest',
+ '-requires', 'Microsoft.Component.MSBuild',
+ '-property', 'installationPath'
).strip()
return glob(os.path.join(
base_path, 'MSBuild', '*', 'Bin', 'MSBuild.exe'))[0]
@@ -66,10 +66,10 @@ def find_msbuild():
def find_vcvarsall():
productdir = find_visual_studio()
- vcvarsall = os.path.join(productdir, "vcvarsall.bat")
+ vcvarsall = os.path.join(productdir, 'vcvarsall.bat')
if os.path.isfile(vcvarsall):
return vcvarsall
- raise SystemExit("Unable to find vcvarsall.bat in productdir: " +
+ raise SystemExit('Unable to find vcvarsall.bat in productdir: ' +
productdir)
@@ -92,9 +92,9 @@ def query_process(cmd, is64bit):
try:
stdout, stderr = popen.communicate()
if popen.wait() != 0:
- raise RuntimeError(stderr.decode("mbcs"))
+ raise RuntimeError(stderr.decode('mbcs'))
- stdout = stdout.decode("mbcs")
+ stdout = stdout.decode('mbcs')
for line in stdout.splitlines():
if '=' not in line:
continue
diff --git a/setup/wincross.py b/setup/wincross.py
index a8da16de41..31e688362c 100644
--- a/setup/wincross.py
+++ b/setup/wincross.py
@@ -88,7 +88,7 @@ class Packages:
self.files_to_download.append(File(pf))
# CRT headers
- add_package(f"Microsoft.VC.{self.crt_version}.CRT.Headers.base")
+ add_package(f'Microsoft.VC.{self.crt_version}.CRT.Headers.base')
# CRT libs
prefix = f'Microsoft.VC.{self.crt_version}.CRT.{arch}.'.lower()
variants = {}
@@ -102,7 +102,7 @@ class Packages:
variants[variant] = pid
add_package(variants[crt_variant])
# ATL headers
- add_package(f"Microsoft.VC.{self.crt_version}.ATL.Headers.base")
+ add_package(f'Microsoft.VC.{self.crt_version}.ATL.Headers.base')
# ATL libs
add_package(f'Microsoft.VC.{self.crt_version}.ATL.{arch}.Spectre.base')
add_package(f'Microsoft.VC.{self.crt_version}.ATL.{arch}.base')
@@ -188,10 +188,10 @@ def download(dest_dir, manifest_version=17, manifest_type='release', manifest_pa
url = f'https://aka.ms/vs/{manifest_version}/{manifest_type}/channel'
print('Downloading top-level manifest from', url)
tm = json.loads(urlopen(url).read())
- print("Got toplevel manifest for", (tm["info"]["productDisplayVersion"]))
- for item in tm["channelItems"]:
- if item.get('type') == "Manifest":
- url = item["payloads"][0]["url"]
+ print('Got toplevel manifest for', (tm['info']['productDisplayVersion']))
+ for item in tm['channelItems']:
+ if item.get('type') == 'Manifest':
+ url = item['payloads'][0]['url']
print('Downloading actual manifest...')
manifest = urlopen(url).read()
@@ -244,7 +244,7 @@ def extract_msi(path, dest_dir):
def extract_zipfile(zf, dest_dir):
- tmp = os.path.join(dest_dir, "extract")
+ tmp = os.path.join(dest_dir, 'extract')
os.mkdir(tmp)
for f in zf.infolist():
name = unquote(f.filename)
@@ -259,7 +259,7 @@ def extract_vsix(path, dest_dir):
print('Extracting', os.path.basename(path), '...')
with TemporaryDirectory(dir=dest_dir) as tdir, ZipFile(path, 'r') as zf:
extract_zipfile(zf, tdir)
- contents = os.path.join(tdir, "Contents")
+ contents = os.path.join(tdir, 'Contents')
merge_trees(contents, dest_dir)
names = zf.namelist()
with open(os.path.join(dest_dir, os.path.basename(path) + '.listing'), 'w') as ls:
diff --git a/src/calibre/__init__.py b/src/calibre/__init__.py
index 37bb1bb635..6ee432132c 100644
--- a/src/calibre/__init__.py
+++ b/src/calibre/__init__.py
@@ -560,15 +560,15 @@ def url_slash_cleaner(url):
def human_readable(size, sep=' '):
- """ Convert a size in bytes into a human readable form """
- divisor, suffix = 1, "B"
+ ''' Convert a size in bytes into a human readable form '''
+ divisor, suffix = 1, 'B'
for i, candidate in enumerate(('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB')):
if size < (1 << ((i + 1) * 10)):
divisor, suffix = (1 << (i * 10)), candidate
break
size = str(float(size)/divisor)
- if size.find(".") > -1:
- size = size[:size.find(".")+2]
+ if size.find('.') > -1:
+ size = size[:size.find('.')+2]
if size.endswith('.0'):
size = size[:-2]
return size + sep + suffix
diff --git a/src/calibre/constants.py b/src/calibre/constants.py
index e91f24b47d..dd11ae7d3c 100644
--- a/src/calibre/constants.py
+++ b/src/calibre/constants.py
@@ -14,7 +14,7 @@ __appname__ = 'calibre'
numeric_version = (7, 24, 101)
__version__ = '.'.join(map(str, numeric_version))
git_version = None
-__author__ = "Kovid Goyal "
+__author__ = 'Kovid Goyal '
'''
Various run time constants.
diff --git a/src/calibre/customize/__init__.py b/src/calibre/customize/__init__.py
index a50a76ac94..ff4f7852e7 100644
--- a/src/calibre/customize/__init__.py
+++ b/src/calibre/customize/__init__.py
@@ -574,10 +574,10 @@ class CatalogPlugin(Plugin): # {{{
if requested_fields - all_fields:
from calibre.library import current_library_name
invalid_fields = sorted(list(requested_fields - all_fields))
- print("invalid --fields specified: %s" % ', '.join(invalid_fields))
+ print('invalid --fields specified: %s' % ', '.join(invalid_fields))
print("available fields in '%s': %s" %
(current_library_name(), ', '.join(sorted(list(all_fields)))))
- raise ValueError("unable to generate catalog with specified fields")
+ raise ValueError('unable to generate catalog with specified fields')
fields = [x for x in of if x in all_fields]
else:
@@ -600,7 +600,7 @@ class CatalogPlugin(Plugin): # {{{
from calibre.ptempfile import PersistentTemporaryDirectory
if type(self) not in builtin_plugins and self.name not in config['disabled_plugins']:
- files_to_copy = [f"{self.name.lower()}.{ext}" for ext in ["ui","py"]]
+ files_to_copy = [f'{self.name.lower()}.{ext}' for ext in ['ui','py']]
resources = zipfile.ZipFile(self.plugin_path,'r')
if self.resources_path is None:
@@ -610,7 +610,7 @@ class CatalogPlugin(Plugin): # {{{
try:
resources.extract(file, self.resources_path)
except:
- print(f" customize:__init__.initialize(): {file} not found in {os.path.basename(self.plugin_path)}")
+ print(f' customize:__init__.initialize(): {file} not found in {os.path.basename(self.plugin_path)}')
continue
resources.close()
diff --git a/src/calibre/customize/builtins.py b/src/calibre/customize/builtins.py
index ceac30d58c..7374ed3cec 100644
--- a/src/calibre/customize/builtins.py
+++ b/src/calibre/customize/builtins.py
@@ -1546,7 +1546,7 @@ class StoreAmazonESKindleStore(StoreBase):
class StoreAmazonUKKindleStore(StoreBase):
name = 'Amazon UK Kindle'
author = 'Kovid Goyal'
- description = 'Kindle books from Amazon\'s UK web site. Also, includes French language e-books.'
+ description = "Kindle books from Amazon's UK web site. Also, includes French language e-books."
actual_plugin = 'calibre.gui2.store.stores.amazon_uk_plugin:AmazonKindleStore'
headquarters = 'UK'
@@ -1595,7 +1595,7 @@ class StoreBaenWebScriptionStore(StoreBase):
class StoreBNStore(StoreBase):
name = 'Barnes and Noble'
- description = 'The world\'s largest book seller. As the ultimate destination for book lovers, Barnes & Noble.com offers an incredible array of content.'
+ description = "The world's largest book seller. As the ultimate destination for book lovers, Barnes & Noble.com offers an incredible array of content."
actual_plugin = 'calibre.gui2.store.stores.bn_plugin:BNStore'
headquarters = 'US'
@@ -1825,7 +1825,7 @@ class StoreOzonRUStore(StoreBase):
class StorePragmaticBookshelfStore(StoreBase):
name = 'Pragmatic Bookshelf'
- description = 'The Pragmatic Bookshelf\'s collection of programming and tech books available as e-books.'
+ description = "The Pragmatic Bookshelf's collection of programming and tech books available as e-books."
actual_plugin = 'calibre.gui2.store.stores.pragmatic_bookshelf_plugin:PragmaticBookshelfStore'
drm_free_only = True
diff --git a/src/calibre/customize/zipplugin.py b/src/calibre/customize/zipplugin.py
index efb988d005..34592057a2 100644
--- a/src/calibre/customize/zipplugin.py
+++ b/src/calibre/customize/zipplugin.py
@@ -118,8 +118,8 @@ def load_translations(namespace, zfp):
_translations_cache[zfp] = None
return
with zipfile.ZipFile(zfp) as zf:
- mo_path = zipfile.Path(zf, f"translations/{lang}.mo")
- if not mo_path.exists() and "_" in lang:
+ mo_path = zipfile.Path(zf, f'translations/{lang}.mo')
+ if not mo_path.exists() and '_' in lang:
mo_path = zipfile.Path(zf, f"translations/{lang.split('_')[0]}.mo")
if mo_path.exists():
mo = mo_path.read_bytes()
diff --git a/src/calibre/db/backend.py b/src/calibre/db/backend.py
index c6ceba1e3c..f17e3cbc7c 100644
--- a/src/calibre/db/backend.py
+++ b/src/calibre/db/backend.py
@@ -1516,8 +1516,8 @@ class DB:
@property
def custom_tables(self):
return {x[0] for x in self.conn.get(
- 'SELECT name FROM sqlite_master WHERE type=\'table\' AND '
- '(name GLOB \'custom_column_*\' OR name GLOB \'books_custom_column_*\')')}
+ "SELECT name FROM sqlite_master WHERE type='table' AND "
+ "(name GLOB 'custom_column_*' OR name GLOB 'books_custom_column_*')")}
@classmethod
def exists_at(cls, path):
@@ -2399,7 +2399,7 @@ class DB:
text = "snippet({fts_table}, 0, ?, ?, '…', {snippet_size})".format(
fts_table=fts_table, snippet_size=max(1, min(snippet_size, 64)))
else:
- text = f"highlight({fts_table}, 0, ?, ?)"
+ text = f'highlight({fts_table}, 0, ?, ?)'
data.append(highlight_start)
data.append(highlight_end)
query = 'SELECT {0}.id, {0}.book, {0}.format, {0}.user_type, {0}.user, {0}.annot_data, {1} FROM {0} '
diff --git a/src/calibre/db/cache.py b/src/calibre/db/cache.py
index 4c472b57cd..a0aaa293cf 100644
--- a/src/calibre/db/cache.py
+++ b/src/calibre/db/cache.py
@@ -1093,7 +1093,7 @@ class Cache:
# We can't clear the composite caches because a read lock is set.
# As a consequence the value of a composite column that calls
# virtual_libraries() might be wrong. Oh well. Log and keep running.
- print('Couldn\'t get write lock after vls_for_books_cache was loaded', file=sys.stderr)
+ print("Couldn't get write lock after vls_for_books_cache was loaded", file=sys.stderr)
traceback.print_exc()
if get_cover:
diff --git a/src/calibre/db/cli/cmd_catalog.py b/src/calibre/db/cli/cmd_catalog.py
index 9f75301a64..5c6fa69568 100644
--- a/src/calibre/db/cli/cmd_catalog.py
+++ b/src/calibre/db/cli/cmd_catalog.py
@@ -61,9 +61,9 @@ see the different options, specify the name of the output file and then the
default=None,
dest='ids',
help=_(
- "Comma-separated list of database IDs to catalog.\n"
- "If declared, --search is ignored.\n"
- "Default: all"
+ 'Comma-separated list of database IDs to catalog.\n'
+ 'If declared, --search is ignored.\n'
+ 'Default: all'
)
)
parser.add_option(
@@ -72,10 +72,10 @@ see the different options, specify the name of the output file and then the
default=None,
dest='search_text',
help=_(
- "Filter the results by the search query. "
- "For the format of the search query, please see "
- "the search-related documentation in the User Manual.\n"
- "Default: no filtering"
+ 'Filter the results by the search query. '
+ 'For the format of the search query, please see '
+ 'the search-related documentation in the User Manual.\n'
+ 'Default: no filtering'
)
)
parser.add_option(
diff --git a/src/calibre/db/cli/cmd_check_library.py b/src/calibre/db/cli/cmd_check_library.py
index 59852c29e3..a0812a1040 100644
--- a/src/calibre/db/cli/cmd_check_library.py
+++ b/src/calibre/db/cli/cmd_check_library.py
@@ -39,8 +39,8 @@ Perform some checks on the filesystem representing a library. Reports are {0}
'--report',
default=None,
dest='report',
- help=_("Comma-separated list of reports.\n"
- "Default: all")
+ help=_('Comma-separated list of reports.\n'
+ 'Default: all')
)
parser.add_option(
@@ -48,8 +48,8 @@ Perform some checks on the filesystem representing a library. Reports are {0}
'--ignore_extensions',
default=None,
dest='exts',
- help=_("Comma-separated list of extensions to ignore.\n"
- "Default: all")
+ help=_('Comma-separated list of extensions to ignore.\n'
+ 'Default: all')
)
parser.add_option(
@@ -57,8 +57,8 @@ Perform some checks on the filesystem representing a library. Reports are {0}
'--ignore_names',
default=None,
dest='names',
- help=_("Comma-separated list of names to ignore.\n"
- "Default: all")
+ help=_('Comma-separated list of names to ignore.\n'
+ 'Default: all')
)
parser.add_option(
'--vacuum-fts-db',
diff --git a/src/calibre/db/cli/cmd_list.py b/src/calibre/db/cli/cmd_list.py
index 032b12e670..cbb62578f4 100644
--- a/src/calibre/db/cli/cmd_list.py
+++ b/src/calibre/db/cli/cmd_list.py
@@ -97,7 +97,7 @@ def implementation(
data[field] = {k: cover(db, k) for k in book_ids}
continue
data[field] = db.all_field_for(field, book_ids)
- return {'book_ids': book_ids, "data": data, 'metadata': metadata, 'fields':fields}
+ return {'book_ids': book_ids, 'data': data, 'metadata': metadata, 'fields':fields}
def stringify(data, metadata, for_machine):
diff --git a/src/calibre/db/cli/cmd_list_categories.py b/src/calibre/db/cli/cmd_list_categories.py
index fe9822cd31..a0f3df64ad 100644
--- a/src/calibre/db/cli/cmd_list_categories.py
+++ b/src/calibre/db/cli/cmd_list_categories.py
@@ -54,8 +54,8 @@ information is the equivalent of what is shown in the Tag browser.
'--categories',
default='',
dest='report',
- help=_("Comma-separated list of category lookup names. "
- "Default: all")
+ help=_('Comma-separated list of category lookup names. '
+ 'Default: all')
)
parser.add_option(
'-w',
diff --git a/src/calibre/db/cli/tests.py b/src/calibre/db/cli/tests.py
index a258da3525..7c51c99f17 100644
--- a/src/calibre/db/cli/tests.py
+++ b/src/calibre/db/cli/tests.py
@@ -23,9 +23,9 @@ class Checker:
class PrintCheckLibraryResultsTest(unittest.TestCase):
- """
+ '''
Asserts the format of the output to the CLI to avoid regressions
- """
+ '''
check = ('dummy_check', 'Dummy Check')
@@ -38,11 +38,11 @@ class PrintCheckLibraryResultsTest(unittest.TestCase):
self.assertEqual(stdout.getvalue(), b'')
def test_human_readable_output(self):
- """
+ '''
Basic check of the human-readable output.
Does not test: the full line format, truncation
- """
+ '''
data = [['first', 'second']]
checker = Checker(dict.fromkeys(self.check))
setattr(checker, self.check[0], data)
@@ -62,9 +62,9 @@ class PrintCheckLibraryResultsTest(unittest.TestCase):
self.assertEqual(result[-1], '')
def test_basic_csv_output(self):
- """
+ '''
Test simple csv output
- """
+ '''
data = [['first', 'second']]
checker = Checker(dict.fromkeys(self.check))
setattr(checker, self.check[0], data)
@@ -76,9 +76,9 @@ class PrintCheckLibraryResultsTest(unittest.TestCase):
self.assertEqual(parsed_result, [[self.check[1], data[0][0], data[0][1]]])
def test_escaped_csv_output(self):
- """
+ '''
Test more complex csv output
- """
+ '''
data = [['I, Caesar', 'second']]
checker = Checker(dict.fromkeys(self.check))
setattr(checker, self.check[0], data)
diff --git a/src/calibre/db/fts/connect.py b/src/calibre/db/fts/connect.py
index 32f9e04393..ce9a54dedc 100644
--- a/src/calibre/db/fts/connect.py
+++ b/src/calibre/db/fts/connect.py
@@ -39,7 +39,7 @@ class FTS:
if conn.fts_dbpath is None:
main_db_path = os.path.abspath(conn.db_filename('main'))
dbpath = os.path.join(os.path.dirname(main_db_path), 'full-text-search.db')
- conn.execute("ATTACH DATABASE ? AS fts_db", (dbpath,))
+ conn.execute('ATTACH DATABASE ? AS fts_db', (dbpath,))
SchemaUpgrade(conn)
conn.execute('UPDATE fts_db.dirtied_formats SET in_progress=FALSE WHERE in_progress=TRUE')
num_dirty = conn.get('''SELECT COUNT(*) from fts_db.dirtied_formats''')[0][0]
diff --git a/src/calibre/db/lazy.py b/src/calibre/db/lazy.py
index 2d0d7de036..d96f033e3a 100644
--- a/src/calibre/db/lazy.py
+++ b/src/calibre/db/lazy.py
@@ -384,8 +384,8 @@ class ProxyMetadata(Metadata):
# compatibility, flag __iter__ as unimplemented. This won't break anything
# because the Metadata version raises AttributeError
def __iter__(self):
- raise NotImplementedError("__iter__() cannot be used in this context. "
- "Use the explicit methods such as all_field_keys()")
+ raise NotImplementedError('__iter__() cannot be used in this context. '
+ 'Use the explicit methods such as all_field_keys()')
def has_key(self, key):
return key in STANDARD_METADATA_FIELDS or key in ga(self, '_user_metadata')
diff --git a/src/calibre/db/locking.py b/src/calibre/db/locking.py
index 6b9d1fedf3..9b61f2bb9d 100644
--- a/src/calibre/db/locking.py
+++ b/src/calibre/db/locking.py
@@ -118,7 +118,7 @@ class SHLock: # {{{
with self._lock:
if self.is_exclusive:
if self._exclusive_owner is not me:
- raise LockingError("release() called on unheld lock")
+ raise LockingError('release() called on unheld lock')
self.is_exclusive -= 1
if not self.is_exclusive:
self._exclusive_owner = None
@@ -143,7 +143,7 @@ class SHLock: # {{{
if self._shared_owners[me] == 0:
del self._shared_owners[me]
except KeyError:
- raise LockingError("release() called on unheld lock")
+ raise LockingError('release() called on unheld lock')
self.is_shared -= 1
if not self.is_shared:
# If there are waiting exclusive locks,
@@ -156,7 +156,7 @@ class SHLock: # {{{
else:
assert not self._shared_queue
else:
- raise LockingError("release() called on unheld lock")
+ raise LockingError('release() called on unheld lock')
def _acquire_shared(self, blocking=True):
me = current_thread()
diff --git a/src/calibre/db/notes/connect.py b/src/calibre/db/notes/connect.py
index 77c191fd2e..fe6c05832c 100644
--- a/src/calibre/db/notes/connect.py
+++ b/src/calibre/db/notes/connect.py
@@ -83,7 +83,7 @@ class Notes:
def reopen(self, backend):
conn = backend.get_connection()
conn.notes_dbpath = os.path.join(self.notes_dir, NOTES_DB_NAME)
- conn.execute("ATTACH DATABASE ? AS notes_db", (conn.notes_dbpath,))
+ conn.execute('ATTACH DATABASE ? AS notes_db', (conn.notes_dbpath,))
self.allowed_fields = set()
triggers = []
for table in backend.tables.values():
diff --git a/src/calibre/db/schema_upgrades.py b/src/calibre/db/schema_upgrades.py
index 6e9a9ef318..1dd5a7d1d0 100644
--- a/src/calibre/db/schema_upgrades.py
+++ b/src/calibre/db/schema_upgrades.py
@@ -300,7 +300,7 @@ class SchemaUpgrade:
for field in itervalues(self.field_metadata):
if field['is_category'] and not field['is_custom'] and 'link_column' in field:
table = self.db.get(
- 'SELECT name FROM sqlite_master WHERE type=\'table\' AND name=?',
+ "SELECT name FROM sqlite_master WHERE type='table' AND name=?",
('books_%s_link'%field['table'],), all=False)
if table is not None:
create_tag_browser_view(field['table'], field['link_column'], field['column'])
@@ -376,7 +376,7 @@ class SchemaUpgrade:
for field in itervalues(self.field_metadata):
if field['is_category'] and not field['is_custom'] and 'link_column' in field:
table = self.db.get(
- 'SELECT name FROM sqlite_master WHERE type=\'table\' AND name=?',
+ "SELECT name FROM sqlite_master WHERE type='table' AND name=?",
('books_%s_link'%field['table'],), all=False)
if table is not None:
create_std_tag_browser_view(field['table'], field['link_column'],
diff --git a/src/calibre/db/tests/fts.py b/src/calibre/db/tests/fts.py
index b607be21ba..8690888c83 100644
--- a/src/calibre/db/tests/fts.py
+++ b/src/calibre/db/tests/fts.py
@@ -83,7 +83,7 @@ class FTSTest(BaseTest):
self.ae(q, expected_tokens)
self.ae(
- tokenize("Some wörds"),
+ tokenize('Some wörds'),
[t('some', 0, 4), t('wörds', 5, 11), t('words', 5, 11, 1)]
)
self.ae(
@@ -91,20 +91,20 @@ class FTSTest(BaseTest):
[t("don't", 0, 5), t('bug', 7, 10)]
)
self.ae(
- tokenize("a,b. c"),
- [t("a", 0, 1), t('b', 2, 3), t('c', 5, 6)]
+ tokenize('a,b. c'),
+ [t('a', 0, 1), t('b', 2, 3), t('c', 5, 6)]
)
self.ae(
- tokenize("a*b+c"),
- [t("a", 0, 1), t('b', 2, 3), t('c', 4, 5)]
+ tokenize('a*b+c'),
+ [t('a', 0, 1), t('b', 2, 3), t('c', 4, 5)]
)
self.ae(
- tokenize("a(b[{^c"),
- [t("a", 0, 1), t('b', 2, 3), t('c', 6, 7)]
+ tokenize('a(b[{^c'),
+ [t('a', 0, 1), t('b', 2, 3), t('c', 6, 7)]
)
self.ae(
- tokenize("a😀smile"),
- [t("a", 0, 1), t('😀', 1, 5), t('smile', 5, 10)]
+ tokenize('a😀smile'),
+ [t('a', 0, 1), t('😀', 1, 5), t('smile', 5, 10)]
)
tt("你don't叫mess", '你', "don't", '叫', 'mess')
@@ -125,14 +125,14 @@ class FTSTest(BaseTest):
conn = TestConn()
conn.insert_text('two words, and a period. With another.')
conn.insert_text('and another re-init')
- self.ae(conn.search("another"), [('and >another< re-init',), ('…With >another<.',)])
- self.ae(conn.search("period"), [('…a >period<. With another.',)])
+ self.ae(conn.search('another'), [('and >another< re-init',), ('…With >another<.',)])
+ self.ae(conn.search('period'), [('…a >period<. With another.',)])
self.ae(conn.term_row_counts(), {'a': 1, 're': 1, 'init': 1, 'and': 2, 'another': 2, 'period': 1, 'two': 1, 'with': 1, 'words': 1})
conn = TestConn()
conn.insert_text('coộl')
self.ae(conn.term_row_counts(), {'cool': 1, 'coộl': 1})
- self.ae(conn.search("cool"), [('>coộl<',)])
- self.ae(conn.search("coộl"), [('>coộl<',)])
+ self.ae(conn.search('cool'), [('>coộl<',)])
+ self.ae(conn.search('coộl'), [('>coộl<',)])
conn = TestConn(remove_diacritics=False)
conn.insert_text('coộl')
self.ae(conn.term_row_counts(), {'coộl': 1})
@@ -140,13 +140,13 @@ class FTSTest(BaseTest):
conn = TestConn()
conn.insert_text("你don't叫mess")
self.ae(conn.term_row_counts(), {"don't": 1, 'mess': 1, '你': 1, '叫': 1})
- self.ae(conn.search("mess"), [("你don't叫>mess<",)])
+ self.ae(conn.search('mess'), [("你don't叫>mess<",)])
self.ae(conn.search('''"don't"'''), [("你>don't<叫mess",)])
- self.ae(conn.search("你"), [(">你你叫叫connection<',),])
- self.ae(conn.search("connect"), [('a simplistic >connection<',),])
- self.ae(conn.search("simplistic connect"), [('a >simplistic< >connection<',),])
- self.ae(conn.search("simplist"), [('a >simplistic< connection',),])
+ self.ae(conn.search('connection'), [('a simplistic >connection<',),])
+ self.ae(conn.search('connect'), [('a simplistic >connection<',),])
+ self.ae(conn.search('simplistic connect'), [('a >simplistic< >connection<',),])
+ self.ae(conn.search('simplist'), [('a >simplistic< connection',),])
# }}}
diff --git a/src/calibre/db/tests/legacy.py b/src/calibre/db/tests/legacy.py
index 82ba8b76ac..9cb4ba13c0 100644
--- a/src/calibre/db/tests/legacy.py
+++ b/src/calibre/db/tests/legacy.py
@@ -356,9 +356,9 @@ class LegacyTest(BaseTest):
def test_legacy_adding_books(self): # {{{
'Test various adding/deleting books methods'
import sqlite3
- con = sqlite3.connect(":memory:")
+ con = sqlite3.connect(':memory:')
try:
- con.execute("create virtual table recipe using fts5(name, ingredients)")
+ con.execute('create virtual table recipe using fts5(name, ingredients)')
except Exception:
self.skipTest('python sqlite3 module does not have FTS5 support')
con.close()
diff --git a/src/calibre/db/tests/locking.py b/src/calibre/db/tests/locking.py
index 56eaf7c52c..3c17c04358 100644
--- a/src/calibre/db/tests/locking.py
+++ b/src/calibre/db/tests/locking.py
@@ -22,7 +22,7 @@ def wait_for(period):
class TestLock(BaseTest):
- """Tests for db locking """
+ '''Tests for db locking '''
def test_owns_locks(self):
lock = SHLock()
diff --git a/src/calibre/db/tests/reading.py b/src/calibre/db/tests/reading.py
index 411f05fe7c..cbdb53eb2b 100644
--- a/src/calibre/db/tests/reading.py
+++ b/src/calibre/db/tests/reading.py
@@ -371,8 +371,8 @@ class ReadingTest(BaseTest):
cache.set_field('timestamp', {1:p('2002-02-06'), 2:p('2000-10-06'), 3:p('2001-06-06')})
# Test numeric compare search
self.assertEqual(cache.search("template:\"program: "
- "floor(days_between(field(\'pubdate\'), "
- "field(\'timestamp\')))#@#:n:>0\""), {2,3})
+ "floor(days_between(field('pubdate'), "
+ "field('timestamp')))#@#:n:>0\""), {2,3})
# Test date search
self.assertEqual(cache.search('template:{pubdate}#@#:d:<2001-09-01"'), {1,3})
# Test boolean search
@@ -380,7 +380,7 @@ class ReadingTest(BaseTest):
self.assertEqual(cache.search('template:{series}#@#:b:false'), {3})
# test primary search
- cache.set_field('title', {1: "Gravity’s Raiñbow"})
+ cache.set_field('title', {1: 'Gravity’s Raiñbow'})
self.assertEqual(cache.search('title:"Gravity\'s Rainbow"'), {1})
# Note that the old db searched uuid for un-prefixed searches, the new
# db does not, for performance
@@ -945,7 +945,7 @@ def evaluate(book, ctx):
from calibre.utils.formatter_functions import load_user_template_functions, unload_user_template_functions
load_user_template_functions('aaaaa',
[['python_stored_template',
- "",
+ '',
0,
'''python:
def evaluate(book, ctx):
diff --git a/src/calibre/db/tests/writing.py b/src/calibre/db/tests/writing.py
index 27ba21e845..9cc25ffa3f 100644
--- a/src/calibre/db/tests/writing.py
+++ b/src/calibre/db/tests/writing.py
@@ -790,7 +790,7 @@ class WritingTest(BaseTest):
self.assertNotIn(uid, t.id_map)
self.assertNotIn(uid, t.col_book_map)
for bid in (1, 2, 3):
- ae(c.field_for('publisher', bid), "mūs")
+ ae(c.field_for('publisher', bid), 'mūs')
c.close()
cache = self.init_cache()
@@ -1026,17 +1026,17 @@ class WritingTest(BaseTest):
self.assertEqual('url2', links['publisher']['random'], 'link for publisher random is wrong')
# Check that renaming a tag keeps the link and clears the link map cache for the book
- self.assertTrue(1 in cache.link_maps_cache, "book not in link_map_cache")
+ self.assertTrue(1 in cache.link_maps_cache, 'book not in link_map_cache')
tag_id = cache.get_item_id('tags', 'foo')
cache.rename_items('tags', {tag_id: 'foobar'})
- self.assertTrue(1 not in cache.link_maps_cache, "book still in link_map_cache")
+ self.assertTrue(1 not in cache.link_maps_cache, 'book still in link_map_cache')
links = cache.get_link_map('tags')
- self.assertTrue('foobar' in links, "rename foo lost the link")
- self.assertEqual(links['foobar'], 'url', "The link changed contents")
+ self.assertTrue('foobar' in links, 'rename foo lost the link')
+ self.assertEqual(links['foobar'], 'url', 'The link changed contents')
links = cache.get_all_link_maps_for_book(1)
- self.assertTrue(1 in cache.link_maps_cache, "book not put back into link_map_cache")
+ self.assertTrue(1 in cache.link_maps_cache, 'book not put back into link_map_cache')
self.assertDictEqual({'publisher': {'random': 'url2'}, 'tags': {'foobar': 'url'}},
- links, "book links incorrect after tag rename")
+ links, 'book links incorrect after tag rename')
# Check ProxyMetadata
mi = cache.get_proxy_metadata(1)
diff --git a/src/calibre/debug.py b/src/calibre/debug.py
index 36310bfe28..e0959616f8 100644
--- a/src/calibre/debug.py
+++ b/src/calibre/debug.py
@@ -63,7 +63,7 @@ as a shebang in scripts, like this:
help=_('Run the GUI with a debug console, logging to the'
' specified path. For internal use only, use the -g'
' option to run the GUI in debug mode'))
- parser.add_option('--run-without-debug', default=False, action='store_true', help=_('Don\'t run with the DEBUG flag set'))
+ parser.add_option('--run-without-debug', default=False, action='store_true', help=_("Don't run with the DEBUG flag set"))
parser.add_option('-w', '--viewer', default=False, action='store_true',
help=_('Run the E-book viewer in debug mode'))
parser.add_option('--paths', default=False, action='store_true',
diff --git a/src/calibre/devices/__init__.py b/src/calibre/devices/__init__.py
index be170ca627..317a30e4b4 100644
--- a/src/calibre/devices/__init__.py
+++ b/src/calibre/devices/__init__.py
@@ -25,7 +25,7 @@ def strptime(src):
def strftime(epoch, zone=time.gmtime):
- src = time.strftime("%w, %d %m %Y %H:%M:%S GMT", zone(epoch)).split()
+ src = time.strftime('%w, %d %m %Y %H:%M:%S GMT', zone(epoch)).split()
src[0] = INVERSE_DAY_MAP[int(src[0][:-1])]+','
src[2] = INVERSE_MONTH_MAP[int(src[2])]
return ' '.join(src)
diff --git a/src/calibre/devices/android/driver.py b/src/calibre/devices/android/driver.py
index 3c75c760a2..8b73d96d12 100644
--- a/src/calibre/devices/android/driver.py
+++ b/src/calibre/devices/android/driver.py
@@ -212,10 +212,10 @@ class ANDROID(USBMS):
EBOOK_DIR_MAIN = ['eBooks/import', 'wordplayer/calibretransfer', 'Books',
'sdcard/ebooks']
EXTRA_CUSTOMIZATION_MESSAGE = [_('Comma separated list of folders to '
- 'send e-books to on the device\'s main memory. The first one that exists will '
+ "send e-books to on the device's main memory. The first one that exists will "
'be used'),
_('Comma separated list of folders to '
- 'send e-books to on the device\'s storage cards. The first one that exists will '
+ "send e-books to on the device's storage cards. The first one that exists will "
'be used')
]
diff --git a/src/calibre/devices/cli.py b/src/calibre/devices/cli.py
index ae26184675..cab3cbf152 100755
--- a/src/calibre/devices/cli.py
+++ b/src/calibre/devices/cli.py
@@ -3,11 +3,11 @@
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal '
-"""
+'''
Provides a command-line interface to ebook devices.
For usage information run the script.
-"""
+'''
import os
import sys
@@ -37,14 +37,14 @@ class FileFormatter:
@property
def mode_string(self):
- """ The mode string for this file. There are only two modes read-only and read-write """
- mode, x = "-", "-"
+ ''' The mode string for this file. There are only two modes read-only and read-write '''
+ mode, x = '-', '-'
if self.is_dir:
- mode, x = "d", "x"
+ mode, x = 'd', 'x'
if self.is_readonly:
- mode += "r-"+x+"r-"+x+"r-"+x
+ mode += 'r-'+x+'r-'+x+'r-'+x
else:
- mode += "rw"+x+"rw"+x+"rw"+x
+ mode += 'rw'+x+'rw'+x+'rw'+x
return mode
@property
@@ -57,41 +57,41 @@ class FileFormatter:
@property
def name_in_color(self):
- """ The name in ANSI text. Directories are blue, ebooks are green """
+ ''' The name in ANSI text. Directories are blue, ebooks are green '''
cname = self.name
- blue, green, normal = "", "", ""
+ blue, green, normal = '', '', ''
if self.term:
blue, green, normal = self.term.BLUE, self.term.GREEN, self.term.NORMAL
if self.is_dir:
cname = blue + self.name + normal
else:
- ext = self.name[self.name.rfind("."):]
- if ext in (".pdf", ".rtf", ".lrf", ".lrx", ".txt"):
+ ext = self.name[self.name.rfind('.'):]
+ if ext in ('.pdf', '.rtf', '.lrf', '.lrx', '.txt'):
cname = green + self.name + normal
return cname
@property
def human_readable_size(self):
- """ File size in human readable form """
+ ''' File size in human readable form '''
return human_readable(self.size)
@property
def modification_time(self):
- """ Last modified time in the Linux ls -l format """
- return time.strftime("%Y-%m-%d %H:%M", time.localtime(self.wtime))
+ ''' Last modified time in the Linux ls -l format '''
+ return time.strftime('%Y-%m-%d %H:%M', time.localtime(self.wtime))
@property
def creation_time(self):
- """ Last modified time in the Linux ls -l format """
- return time.strftime("%Y-%m-%d %H:%M", time.localtime(self.ctime))
+ ''' Last modified time in the Linux ls -l format '''
+ return time.strftime('%Y-%m-%d %H:%M', time.localtime(self.ctime))
def info(dev):
info = dev.get_device_information()
- print("Device name: ", info[0])
- print("Device version: ", info[1])
- print("Software version:", info[2])
- print("Mime type: ", info[3])
+ print('Device name: ', info[0])
+ print('Device version: ', info[1])
+ print('Software version:', info[2])
+ print('Mime type: ', info[3])
def ls(dev, path, recurse=False, human_readable_size=False, ll=False, cols=0):
@@ -115,12 +115,12 @@ def ls(dev, path, recurse=False, human_readable_size=False, ll=False, cols=0):
return rowwidths
output = PolyglotStringIO()
- if path.endswith("/") and len(path) > 1:
+ if path.endswith('/') and len(path) > 1:
path = path[:-1]
dirs = dev.list(path, recurse)
for dir in dirs:
if recurse:
- prints(dir[0] + ":", file=output)
+ prints(dir[0] + ':', file=output)
lsoutput, lscoloutput = [], []
files = dir[1]
maxlen = 0
@@ -141,7 +141,7 @@ def ls(dev, path, recurse=False, human_readable_size=False, ll=False, cols=0):
size = str(file.size)
if human_readable_size:
size = file.human_readable_size
- prints(file.mode_string, ("%"+str(maxlen)+"s")%size, file.modification_time, name, file=output)
+ prints(file.mode_string, ('%'+str(maxlen)+'s')%size, file.modification_time, name, file=output)
if not ll and len(lsoutput) > 0:
trytable = []
for colwidth in range(MINIMUM_COL_WIDTH, cols):
@@ -163,10 +163,10 @@ def ls(dev, path, recurse=False, human_readable_size=False, ll=False, cols=0):
for r in range(len(trytable)):
for c in range(len(trytable[r])):
padding = rowwidths[c] - len(trytable[r][c])
- prints(trytablecol[r][c], "".ljust(padding), end=' ', file=output)
+ prints(trytablecol[r][c], ''.ljust(padding), end=' ', file=output)
prints(file=output)
prints(file=output)
- listing = output.getvalue().rstrip() + "\n"
+ listing = output.getvalue().rstrip() + '\n'
output.close()
return listing
@@ -183,13 +183,13 @@ def main():
from calibre.utils.terminal import geometry
cols = geometry()[0]
- parser = OptionParser(usage="usage: %prog [options] command args\n\ncommand "+
- "is one of: info, books, df, ls, cp, mkdir, touch, cat, rm, eject, test_file\n\n"+
- "For help on a particular command: %prog command", version=__appname__+" version: " + __version__)
- parser.add_option("--log-packets", help="print out packet stream to stdout. "+
- "The numbers in the left column are byte offsets that allow the packet size to be read off easily.",
- dest="log_packets", action="store_true", default=False)
- parser.remove_option("-h")
+ parser = OptionParser(usage='usage: %prog [options] command args\n\ncommand '+
+ 'is one of: info, books, df, ls, cp, mkdir, touch, cat, rm, eject, test_file\n\n'+
+ 'For help on a particular command: %prog command', version=__appname__+' version: ' + __version__)
+ parser.add_option('--log-packets', help='print out packet stream to stdout. '+
+ 'The numbers in the left column are byte offsets that allow the packet size to be read off easily.',
+ dest='log_packets', action='store_true', default=False)
+ parser.remove_option('-h')
parser.disable_interspersed_args() # Allow unrecognized options
options, args = parser.parse_args()
@@ -238,55 +238,55 @@ def main():
break
try:
- if command == "df":
+ if command == 'df':
total = dev.total_space(end_session=False)
free = dev.free_space()
- where = ("Memory", "Card A", "Card B")
- print("Filesystem\tSize \tUsed \tAvail \tUse%")
+ where = ('Memory', 'Card A', 'Card B')
+ print('Filesystem\tSize \tUsed \tAvail \tUse%')
for i in range(3):
- print("%-10s\t%s\t%s\t%s\t%s"%(where[i], human_readable(total[i]), human_readable(total[i]-free[i]), human_readable(free[i]),
- str(0 if total[i]==0 else int(100*(total[i]-free[i])/(total[i]*1.)))+"%"))
+ print('%-10s\t%s\t%s\t%s\t%s'%(where[i], human_readable(total[i]), human_readable(total[i]-free[i]), human_readable(free[i]),
+ str(0 if total[i]==0 else int(100*(total[i]-free[i])/(total[i]*1.)))+'%'))
elif command == 'eject':
dev.eject()
- elif command == "books":
- print("Books in main memory:")
+ elif command == 'books':
+ print('Books in main memory:')
for book in dev.books():
print(book)
- print("\nBooks on storage carda:")
+ print('\nBooks on storage carda:')
for book in dev.books(oncard='carda'):
print(book)
- print("\nBooks on storage cardb:")
+ print('\nBooks on storage cardb:')
for book in dev.books(oncard='cardb'):
print(book)
- elif command == "mkdir":
- parser = OptionParser(usage="usage: %prog mkdir [options] path\nCreate a folder on the device\n\npath must begin with / or card:/")
+ elif command == 'mkdir':
+ parser = OptionParser(usage='usage: %prog mkdir [options] path\nCreate a folder on the device\n\npath must begin with / or card:/')
if len(args) != 1:
parser.print_help()
sys.exit(1)
dev.mkdir(args[0])
- elif command == "ls":
- parser = OptionParser(usage="usage: %prog ls [options] path\nList files on the device\n\npath must begin with / or card:/")
+ elif command == 'ls':
+ parser = OptionParser(usage='usage: %prog ls [options] path\nList files on the device\n\npath must begin with / or card:/')
parser.add_option(
- "-l", help="In addition to the name of each file, print the file type, permissions, and timestamp (the modification time, in the local timezone). Times are local.", # noqa: E501
- dest="ll", action="store_true", default=False)
- parser.add_option("-R", help="Recursively list subfolders encountered. /dev and /proc are omitted",
- dest="recurse", action="store_true", default=False)
- parser.remove_option("-h")
- parser.add_option("-h", "--human-readable", help="show sizes in human readable format", dest="hrs", action="store_true", default=False)
+ '-l', help='In addition to the name of each file, print the file type, permissions, and timestamp (the modification time, in the local timezone). Times are local.', # noqa: E501
+ dest='ll', action='store_true', default=False)
+ parser.add_option('-R', help='Recursively list subfolders encountered. /dev and /proc are omitted',
+ dest='recurse', action='store_true', default=False)
+ parser.remove_option('-h')
+ parser.add_option('-h', '--human-readable', help='show sizes in human readable format', dest='hrs', action='store_true', default=False)
options, args = parser.parse_args(args)
if len(args) != 1:
parser.print_help()
return 1
print(ls(dev, args[0], recurse=options.recurse, ll=options.ll, human_readable_size=options.hrs, cols=cols), end=' ')
- elif command == "info":
+ elif command == 'info':
info(dev)
- elif command == "cp":
- usage="usage: %prog cp [options] source destination\nCopy files to/from the device\n\n"+\
- "One of source or destination must be a path on the device. \n\nDevice paths have the form\n"+\
- "dev:mountpoint/my/path\n"+\
- "where mountpoint is one of / or carda: or cardb:/\n\n"+\
- "source must point to a file for which you have read permissions\n"+\
- "destination must point to a file or folder for which you have write permissions"
+ elif command == 'cp':
+ usage='usage: %prog cp [options] source destination\nCopy files to/from the device\n\n'+\
+ 'One of source or destination must be a path on the device. \n\nDevice paths have the form\n'+\
+ 'dev:mountpoint/my/path\n'+\
+ 'where mountpoint is one of / or carda: or cardb:/\n\n'+\
+ 'source must point to a file for which you have read permissions\n'+\
+ 'destination must point to a file or folder for which you have write permissions'
parser = OptionParser(usage=usage)
parser.add_option('-f', '--force', dest='force', action='store_true', default=False,
help='Overwrite the destination file if it exists already.')
@@ -294,15 +294,15 @@ def main():
if len(args) != 2:
parser.print_help()
return 1
- if args[0].startswith("dev:"):
+ if args[0].startswith('dev:'):
outfile = args[1]
path = args[0][4:]
- if path.endswith("/"):
+ if path.endswith('/'):
path = path[:-1]
if os.path.isdir(outfile):
- outfile = os.path.join(outfile, path[path.rfind("/")+1:])
+ outfile = os.path.join(outfile, path[path.rfind('/')+1:])
try:
- outfile = open(outfile, "wb")
+ outfile = open(outfile, 'wb')
except OSError as e:
print(e, file=sys.stderr)
parser.print_help()
@@ -310,9 +310,9 @@ def main():
dev.get_file(path, outfile)
fsync(outfile)
outfile.close()
- elif args[1].startswith("dev:"):
+ elif args[1].startswith('dev:'):
try:
- infile = open(args[0], "rb")
+ infile = open(args[0], 'rb')
except OSError as e:
print(e, file=sys.stderr)
parser.print_help()
@@ -322,31 +322,31 @@ def main():
else:
parser.print_help()
return 1
- elif command == "cat":
+ elif command == 'cat':
outfile = sys.stdout
parser = OptionParser(
- usage="usage: %prog cat path\nShow file on the device\n\npath should point to a file on the device and must begin with /,a:/ or b:/")
+ usage='usage: %prog cat path\nShow file on the device\n\npath should point to a file on the device and must begin with /,a:/ or b:/')
options, args = parser.parse_args(args)
if len(args) != 1:
parser.print_help()
return 1
- if args[0].endswith("/"):
+ if args[0].endswith('/'):
path = args[0][:-1]
else:
path = args[0]
outfile = sys.stdout
dev.get_file(path, outfile)
- elif command == "rm":
- parser = OptionParser(usage="usage: %prog rm path\nDelete files from the device\n\npath should point to a file or empty folder on the device "+
- "and must begin with / or card:/\n\n"+
- "rm will DELETE the file. Be very CAREFUL")
+ elif command == 'rm':
+ parser = OptionParser(usage='usage: %prog rm path\nDelete files from the device\n\npath should point to a file or empty folder on the device '+
+ 'and must begin with / or card:/\n\n'+
+ 'rm will DELETE the file. Be very CAREFUL')
options, args = parser.parse_args(args)
if len(args) != 1:
parser.print_help()
return 1
dev.rm(args[0])
- elif command == "touch":
- parser = OptionParser(usage="usage: %prog touch path\nCreate an empty file on the device\n\npath should point to a file on the device and must begin with /,a:/ or b:/\n\n"+ # noqa: E501
+ elif command == 'touch':
+ parser = OptionParser(usage='usage: %prog touch path\nCreate an empty file on the device\n\npath should point to a file on the device and must begin with /,a:/ or b:/\n\n'+ # noqa: E501
"Unfortunately, I can't figure out how to update file times on the device, so if path already exists, touch does nothing")
options, args = parser.parse_args(args)
if len(args) != 1:
@@ -354,7 +354,7 @@ def main():
return 1
dev.touch(args[0])
elif command == 'test_file':
- parser = OptionParser(usage=("usage: %prog test_file path\n"
+ parser = OptionParser(usage=('usage: %prog test_file path\n'
'Open device, copy file specified by path to device and '
'then eject device.'))
options, args = parser.parse_args(args)
@@ -373,7 +373,7 @@ def main():
dev.close()
return 1
except DeviceLocked:
- print("The device is locked. Use the --unlock option", file=sys.stderr)
+ print('The device is locked. Use the --unlock option', file=sys.stderr)
except (ArgumentError, DeviceError) as e:
print(e, file=sys.stderr)
return 1
diff --git a/src/calibre/devices/cybook/t2b.py b/src/calibre/devices/cybook/t2b.py
index 62ac8cdb2f..22ccbcef3c 100644
--- a/src/calibre/devices/cybook/t2b.py
+++ b/src/calibre/devices/cybook/t2b.py
@@ -23,7 +23,7 @@ def reduce_color(c):
def i2b(n):
- return "".join([str((n >> y) & 1) for y in range(1, -1, -1)])
+ return ''.join([str((n >> y) & 1) for y in range(1, -1, -1)])
def write_t2b(t2bfile, coverdata=None):
@@ -34,7 +34,7 @@ def write_t2b(t2bfile, coverdata=None):
from PIL import Image
if coverdata is not None:
coverdata = io.BytesIO(coverdata)
- cover = Image.open(coverdata).convert("L")
+ cover = Image.open(coverdata).convert('L')
cover.thumbnail((96, 144), Image.Resampling.LANCZOS)
t2bcover = Image.new('L', (96, 144), 'white')
diff --git a/src/calibre/devices/cybook/t4b.py b/src/calibre/devices/cybook/t4b.py
index ce977be27b..da5c9e51f9 100644
--- a/src/calibre/devices/cybook/t4b.py
+++ b/src/calibre/devices/cybook/t4b.py
@@ -21,7 +21,7 @@ def write_t4b(t4bfile, coverdata=None):
from PIL import Image
if coverdata is not None:
coverdata = BytesIO(coverdata)
- cover = Image.open(coverdata).convert("L")
+ cover = Image.open(coverdata).convert('L')
cover.thumbnail((96, 144), Image.Resampling.LANCZOS)
t4bcover = Image.new('L', (96, 144), 'white')
diff --git a/src/calibre/devices/eb600/driver.py b/src/calibre/devices/eb600/driver.py
index 2c0d65772a..ea09009b64 100644
--- a/src/calibre/devices/eb600/driver.py
+++ b/src/calibre/devices/eb600/driver.py
@@ -71,7 +71,7 @@ class TOLINO(EB600):
EXTRA_CUSTOMIZATION_MESSAGE = [
_('Swap main and card A') +
':::' +
- _('Check this box if the device\'s main memory is being seen as card a and the card '
+ _("Check this box if the device's main memory is being seen as card a and the card "
'is being seen as main memory. Some tolino devices may need this option.'),
]
diff --git a/src/calibre/devices/errors.py b/src/calibre/devices/errors.py
index 7449bbcb71..bd6c2f301e 100644
--- a/src/calibre/devices/errors.py
+++ b/src/calibre/devices/errors.py
@@ -1,36 +1,36 @@
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal '
-"""
+'''
Defines the errors that the device drivers generate.
G{classtree ProtocolError}
-"""
+'''
class ProtocolError(Exception):
- """ The base class for all exceptions in this package """
+ ''' The base class for all exceptions in this package '''
def __init__(self, msg):
Exception.__init__(self, msg)
class TimeoutError(ProtocolError):
- """ There was a timeout during communication """
+ ''' There was a timeout during communication '''
def __init__(self, func_name):
ProtocolError.__init__(
self,
- "There was a timeout while communicating with the device in function: " +
+ 'There was a timeout while communicating with the device in function: ' +
func_name
)
class DeviceError(ProtocolError):
- """ Raised when device is not found """
+ ''' Raised when device is not found '''
def __init__(self, msg=None):
if msg is None:
- msg = "Unable to find SONY Reader. Is it connected?"
+ msg = 'Unable to find SONY Reader. Is it connected?'
ProtocolError.__init__(self, msg)
@@ -71,14 +71,14 @@ class OpenActionNeeded(DeviceError):
class InitialConnectionError(OpenFeedback):
- """ Errors detected during connection after detection but before open, for
- e.g. in the is_connected() method. """
+ ''' Errors detected during connection after detection but before open, for
+ e.g. in the is_connected() method. '''
class OpenFailed(ProtocolError):
- """ Raised when device cannot be opened this time. No retry is to be done.
+ ''' Raised when device cannot be opened this time. No retry is to be done.
The device should continue to be polled for future opens. If the
- message is empty, no exception trace is produced. """
+ message is empty, no exception trace is produced. '''
def __init__(self, msg):
ProtocolError.__init__(self, msg)
@@ -86,36 +86,36 @@ class OpenFailed(ProtocolError):
class DeviceBusy(ProtocolError):
- """ Raised when device is busy """
+ ''' Raised when device is busy '''
- def __init__(self, uerr=""):
+ def __init__(self, uerr=''):
ProtocolError.__init__(
- self, "Device is in use by another application:"
- "\nUnderlying error:" + str(uerr)
+ self, 'Device is in use by another application:'
+ '\nUnderlying error:' + str(uerr)
)
class DeviceLocked(ProtocolError):
- """ Raised when device has been locked """
+ ''' Raised when device has been locked '''
def __init__(self):
- ProtocolError.__init__(self, "Device is locked")
+ ProtocolError.__init__(self, 'Device is locked')
class PacketError(ProtocolError):
- """ Errors with creating/interpreting packets """
+ ''' Errors with creating/interpreting packets '''
class FreeSpaceError(ProtocolError):
- """ Errors caused when trying to put files onto an overcrowded device """
+ ''' Errors caused when trying to put files onto an overcrowded device '''
class ArgumentError(ProtocolError):
- """ Errors caused by invalid arguments to a public interface function """
+ ''' Errors caused by invalid arguments to a public interface function '''
class PathError(ArgumentError):
- """ When a user supplies an incorrect/invalid path """
+ ''' When a user supplies an incorrect/invalid path '''
def __init__(self, msg, path=None):
ArgumentError.__init__(self, msg)
@@ -123,7 +123,7 @@ class PathError(ArgumentError):
class ControlError(ProtocolError):
- """ Errors in Command/Response pairs while communicating with the device """
+ ''' Errors in Command/Response pairs while communicating with the device '''
def __init__(self, query=None, response=None, desc=None):
self.query = query
@@ -133,13 +133,13 @@ class ControlError(ProtocolError):
def __str__(self):
if self.query and self.response:
- return "Got unexpected response:\n" + \
- "query:\n"+str(self.query.query)+"\n"+\
- "expected:\n"+str(self.query.response)+"\n" +\
- "actual:\n"+str(self.response)
+ return 'Got unexpected response:\n' + \
+ 'query:\n'+str(self.query.query)+'\n'+\
+ 'expected:\n'+str(self.query.response)+'\n' +\
+ 'actual:\n'+str(self.response)
if self.desc:
return self.desc
- return "Unknown control error occurred"
+ return 'Unknown control error occurred'
class WrongDestinationError(PathError):
diff --git a/src/calibre/devices/interface.py b/src/calibre/devices/interface.py
index 2c2df90771..be3dc32676 100644
--- a/src/calibre/devices/interface.py
+++ b/src/calibre/devices/interface.py
@@ -18,14 +18,14 @@ class OpenPopupMessage:
class DevicePlugin(Plugin):
- """
+ '''
Defines the interface that should be implemented by backends that
communicate with an e-book reader.
- """
+ '''
type = _('Device interface')
#: Ordered list of supported formats
- FORMATS = ["lrf", "rtf", "pdf", "txt"]
+ FORMATS = ['lrf', 'rtf', 'pdf', 'txt']
# If True, the config dialog will not show the formats box
HIDE_FORMATS_CONFIG_BOX = False
@@ -226,7 +226,7 @@ class DevicePlugin(Plugin):
def reset(self, key='-1', log_packets=False, report_progress=None,
detected_device=None):
- """
+ '''
:param key: The key to unlock the device
:param log_packets: If true the packet stream to/from the device is logged
:param report_progress: Function that is called with a % progress
@@ -235,7 +235,7 @@ class DevicePlugin(Plugin):
task does not have any progress information
:param detected_device: Device information from the device scanner
- """
+ '''
raise NotImplementedError()
def can_handle_windows(self, usbdevice, debug=False):
@@ -324,14 +324,14 @@ class DevicePlugin(Plugin):
raise NotImplementedError()
def get_device_information(self, end_session=True):
- """
+ '''
Ask device for device information. See L{DeviceInfoQuery}.
:return: (device name, device version, software version on device, MIME type)
The tuple can optionally have a fifth element, which is a
drive information dictionary. See usbms.driver for an example.
- """
+ '''
raise NotImplementedError()
def get_driveinfo(self):
diff --git a/src/calibre/devices/jetbook/driver.py b/src/calibre/devices/jetbook/driver.py
index 98c20167ea..914fc98f22 100644
--- a/src/calibre/devices/jetbook/driver.py
+++ b/src/calibre/devices/jetbook/driver.py
@@ -38,8 +38,8 @@ class JETBOOK(USBMS):
MAIN_MEMORY_VOLUME_LABEL = 'Jetbook Main Memory'
STORAGE_CARD_VOLUME_LABEL = 'Jetbook Storage Card'
- EBOOK_DIR_MAIN = "Books"
- EBOOK_DIR_CARD_A = "Books"
+ EBOOK_DIR_MAIN = 'Books'
+ EBOOK_DIR_CARD_A = 'Books'
SUPPORTS_SUB_DIRS = True
JETBOOK_FILE_NAME_PATTERN = re.compile(
diff --git a/src/calibre/devices/kindle/apnx.py b/src/calibre/devices/kindle/apnx.py
index 91b9edbb5d..9321b1fd53 100644
--- a/src/calibre/devices/kindle/apnx.py
+++ b/src/calibre/devices/kindle/apnx.py
@@ -24,9 +24,9 @@ from polyglot.builtins import as_bytes, as_unicode
class APNXBuilder:
- """
+ '''
Create an APNX file using a pseudo page mapping.
- """
+ '''
generators: dict[str, IPageGenerator] = {
FastPageGenerator.instance.name(): FastPageGenerator.instance,
@@ -36,11 +36,11 @@ class APNXBuilder:
}
def write_apnx(self, mobi_file_path: str, apnx_path: str, method: str | None = None, page_count: int = 0):
- """
+ '''
If you want a fixed number of pages (such as from a custom column) then
pass in a value to page_count, otherwise a count will be estimated
using either the fast or accurate algorithm.
- """
+ '''
apnx_meta = self.get_apnx_meta(mobi_file_path)
if page_count:
diff --git a/src/calibre/devices/kindle/apnx_page_generator/generators/accurate_page_generator.py b/src/calibre/devices/kindle/apnx_page_generator/generators/accurate_page_generator.py
index fa297d32bf..e657a09705 100644
--- a/src/calibre/devices/kindle/apnx_page_generator/generators/accurate_page_generator.py
+++ b/src/calibre/devices/kindle/apnx_page_generator/generators/accurate_page_generator.py
@@ -13,13 +13,13 @@ class AccuratePageGenerator(IPageGenerator):
instance = None
def name(self) -> str:
- return "accurate"
+ return 'accurate'
def _generate_fallback(self, mobi_file_path: str, real_count: int | None) -> Pages:
return FastPageGenerator.instance.generate(mobi_file_path, real_count)
def _generate(self, mobi_file_path: str, real_count: int | None) -> Pages:
- """
+ '''
A more accurate but much more resource intensive and slower
method to calculate the page length.
@@ -35,7 +35,7 @@ class AccuratePageGenerator(IPageGenerator):
This can be make more accurate by accounting for
as a new page marker.
And
elements as an empty line.
- """
+ '''
pages = []
html = mobi_html(mobi_file_path)
diff --git a/src/calibre/devices/kindle/apnx_page_generator/generators/exact_page_generator.py b/src/calibre/devices/kindle/apnx_page_generator/generators/exact_page_generator.py
index 37341b2e03..d492312426 100644
--- a/src/calibre/devices/kindle/apnx_page_generator/generators/exact_page_generator.py
+++ b/src/calibre/devices/kindle/apnx_page_generator/generators/exact_page_generator.py
@@ -13,17 +13,17 @@ class ExactPageGenerator(IPageGenerator):
instance = None
def name(self) -> str:
- return "exact"
+ return 'exact'
def _generate_fallback(self, mobi_file_path: str, real_count: int | None) -> Pages:
return FastPageGenerator.instance.generate(mobi_file_path, real_count)
def _generate(self, mobi_file_path: str, real_count: int | None) -> Pages:
- """
+ '''
Given a specified page count (such as from a custom column),
create our array of pages for the apnx file by dividing by
the content size of the book.
- """
+ '''
pages = []
count = 0
diff --git a/src/calibre/devices/kindle/apnx_page_generator/generators/fast_page_generator.py b/src/calibre/devices/kindle/apnx_page_generator/generators/fast_page_generator.py
index 3a4dbce532..9027f4d1bf 100644
--- a/src/calibre/devices/kindle/apnx_page_generator/generators/fast_page_generator.py
+++ b/src/calibre/devices/kindle/apnx_page_generator/generators/fast_page_generator.py
@@ -10,10 +10,10 @@ from calibre.devices.kindle.apnx_page_generator.pages import Pages
class FastPageGenerator(IPageGenerator):
def name(self) -> str:
- return "fast"
+ return 'fast'
def _generate_fallback(self, mobi_file_path: str, real_count: int | None) -> Pages:
- raise Exception("Fast calculation impossible.")
+ raise Exception('Fast calculation impossible.')
def _generate(self, mobi_file_path: str, real_count: int | None) -> Pages:
"""
diff --git a/src/calibre/devices/kindle/apnx_page_generator/generators/pagebreak_page_generator.py b/src/calibre/devices/kindle/apnx_page_generator/generators/pagebreak_page_generator.py
index e71346bd77..433961b1eb 100644
--- a/src/calibre/devices/kindle/apnx_page_generator/generators/pagebreak_page_generator.py
+++ b/src/calibre/devices/kindle/apnx_page_generator/generators/pagebreak_page_generator.py
@@ -12,13 +12,13 @@ from calibre.devices.kindle.apnx_page_generator.pages import Pages
class PagebreakPageGenerator(IPageGenerator):
def name(self) -> str:
- return "pagebreak"
+ return 'pagebreak'
def _generate_fallback(self, mobi_file_path: str, real_count: int | None) -> Pages:
return FastPageGenerator.instance.generate(mobi_file_path, real_count)
def _generate(self, mobi_file_path: str, real_count: int | None) -> Pages:
- """ Determine pages based on the presence of <*pagebreak*/>. """
+ ''' Determine pages based on the presence of <*pagebreak*/>. '''
html = mobi_html(mobi_file_path)
pages = []
for m in re.finditer(b'<[^>]*pagebreak[^>]*>', html):
diff --git a/src/calibre/devices/kindle/apnx_page_generator/i_page_generator.py b/src/calibre/devices/kindle/apnx_page_generator/i_page_generator.py
index 315ce59fe6..5388c96ee9 100644
--- a/src/calibre/devices/kindle/apnx_page_generator/i_page_generator.py
+++ b/src/calibre/devices/kindle/apnx_page_generator/i_page_generator.py
@@ -28,7 +28,7 @@ class IPageGenerator(metaclass=ABCMeta):
return result
return self._generate_fallback(mobi_file_path, real_count)
except Exception as e:
- if self.__class__.__name__ == "FastPageGenerator":
+ if self.__class__.__name__ == 'FastPageGenerator':
raise e
return self._generate_fallback(mobi_file_path, real_count)
@@ -41,7 +41,7 @@ def mobi_html(mobi_file_path: str) -> bytes:
from calibre.ebooks.mobi.reader.mobi6 import MobiReader
mr = MobiReader(mobi_file_path, default_log)
if mr.book_header.encryption_type != 0:
- raise Exception("DRMed book")
+ raise Exception('DRMed book')
mr.extract_text()
return as_bytes(mr.mobi_html.lower())
diff --git a/src/calibre/devices/kindle/apnx_page_generator/page_group.py b/src/calibre/devices/kindle/apnx_page_generator/page_group.py
index e9504a0ad7..1b777756a4 100644
--- a/src/calibre/devices/kindle/apnx_page_generator/page_group.py
+++ b/src/calibre/devices/kindle/apnx_page_generator/page_group.py
@@ -7,7 +7,7 @@ from calibre.devices.kindle.apnx_page_generator.page_number_type import PageNumb
class PageGroup:
- """Simulate constructor overloading"""
+ '''Simulate constructor overloading'''
def __init__(self, page_locations: int | list[int], page_number_type: PageNumberTypes, first_value: int,
page_labels: str | list[str] | None = None):
if page_locations.__class__ is int:
@@ -52,5 +52,5 @@ class PageGroup:
if self.__page_number_type != PageNumberTypes.Custom:
values = str(self.__first_value)
else:
- values = "|".join(self.__page_number_labels)
- return f"({starting_location},{self.__page_number_type.value},{values})"
+ values = '|'.join(self.__page_number_labels)
+ return f'({starting_location},{self.__page_number_type.value},{values})'
diff --git a/src/calibre/devices/kindle/apnx_page_generator/page_number_type.py b/src/calibre/devices/kindle/apnx_page_generator/page_number_type.py
index 4f468ab204..e8c49573b1 100644
--- a/src/calibre/devices/kindle/apnx_page_generator/page_number_type.py
+++ b/src/calibre/devices/kindle/apnx_page_generator/page_number_type.py
@@ -6,6 +6,6 @@ import enum
class PageNumberTypes(enum.Enum):
- Arabic = "a"
- Roman = "r"
- Custom = "c"
+ Arabic = 'a'
+ Roman = 'r'
+ Custom = 'c'
diff --git a/src/calibre/devices/kindle/apnx_page_generator/pages.py b/src/calibre/devices/kindle/apnx_page_generator/pages.py
index 37f4a6528b..6960c856f4 100644
--- a/src/calibre/devices/kindle/apnx_page_generator/pages.py
+++ b/src/calibre/devices/kindle/apnx_page_generator/pages.py
@@ -30,7 +30,7 @@ class Pages:
for group in self.__pages_groups:
result.append(group.get_page_map(location))
location += group.number_of_pages
- return ",".join(result)
+ return ','.join(result)
@property
def page_locations(self) -> list[int]:
diff --git a/src/calibre/devices/kindle/bookmark.py b/src/calibre/devices/kindle/bookmark.py
index b794c963e5..61e8671f01 100644
--- a/src/calibre/devices/kindle/bookmark.py
+++ b/src/calibre/devices/kindle/bookmark.py
@@ -75,14 +75,14 @@ class Bookmark(): # {{{
entry_type = None
rec_len, = unpack('>I', data[eo+4:eo+8])
if rec_len == 0:
- current_block = "empty_data"
- elif data[eo+8:eo+12] == b"EBAR":
- current_block = "data_header"
+ current_block = 'empty_data'
+ elif data[eo+8:eo+12] == b'EBAR':
+ current_block = 'data_header'
# entry_type = "data_header"
location, = unpack('>I', data[eo+0x34:eo+0x38])
# print "data_header location: %d" % location
else:
- current_block = "text_block"
+ current_block = 'text_block'
if previous_block == 'empty_data':
entry_type = 'Note'
elif previous_block == 'data_header':
@@ -149,7 +149,7 @@ class Bookmark(): # {{{
mi = get_topaz_metadata(stream)
my_clippings = self.path
split = my_clippings.find('documents') + len('documents/')
- my_clippings = my_clippings[:split] + "My Clippings.txt"
+ my_clippings = my_clippings[:split] + 'My Clippings.txt'
try:
with open(my_clippings, encoding='utf-8', errors='replace') as f2:
marker_found = 0
@@ -274,7 +274,7 @@ class Bookmark(): # {{{
self.last_read_location = self.last_read - self.pdf_page_offset
else:
- print("unsupported bookmark_extension: %s" % self.bookmark_extension)
+ print('unsupported bookmark_extension: %s' % self.bookmark_extension)
self.user_notes = user_notes
def get_book_length(self):
@@ -303,6 +303,6 @@ class Bookmark(): # {{{
except:
pass
else:
- print("unsupported bookmark_extension: %s" % self.bookmark_extension)
+ print('unsupported bookmark_extension: %s' % self.bookmark_extension)
# }}}
diff --git a/src/calibre/devices/kindle/driver.py b/src/calibre/devices/kindle/driver.py
index 9b022966ac..b5c53e479b 100644
--- a/src/calibre/devices/kindle/driver.py
+++ b/src/calibre/devices/kindle/driver.py
@@ -264,12 +264,12 @@ class KINDLE(USBMS):
# Add the last-read location
if bookmark.book_format == 'pdf':
- markup = _("%(time)s
Last page read: %(loc)d (%(pr)d%%)") % dict(
+ markup = _('%(time)s
Last page read: %(loc)d (%(pr)d%%)') % dict(
time=strftime('%x', timestamp.timetuple()),
loc=last_read_location,
pr=percent_read)
else:
- markup = _("%(time)s
Last page read: Location %(loc)d (%(pr)d%%)") % dict(
+ markup = _('%(time)s
Last page read: Location %(loc)d (%(pr)d%%)') % dict(
time=strftime('%x', timestamp.timetuple()),
loc=last_read_location,
pr=percent_read)
@@ -610,7 +610,7 @@ class KINDLE2(KINDLE):
# Create the sidecar folder if necessary
if (self.sidecar_apnx):
- path = os.path.join(os.path.dirname(filepath), filename+".sdr")
+ path = os.path.join(os.path.dirname(filepath), filename+'.sdr')
if not os.path.exists(path):
os.makedirs(path)
@@ -636,7 +636,7 @@ class KINDLE2(KINDLE):
if temp in self.EXTRA_CUSTOMIZATION_CHOICES[self.OPT_APNX_METHOD]:
method = temp
else:
- print("Invalid method choice for this book (%r), ignoring." % temp)
+ print('Invalid method choice for this book (%r), ignoring.' % temp)
except:
print('Could not retrieve override method choice, using default.')
apnx_builder.write_apnx(filepath, apnx_path, method=method, page_count=custom_page_count)
diff --git a/src/calibre/devices/kobo/bookmark.py b/src/calibre/devices/kobo/bookmark.py
index 75b53c9f06..75c437225d 100644
--- a/src/calibre/devices/kobo/bookmark.py
+++ b/src/calibre/devices/kobo/bookmark.py
@@ -54,7 +54,7 @@ class Bookmark(): # {{{
'ORDER BY bm.ContentID, bm.chapterprogress'
)
- debug_print(f"Kobo::Bookmark::get_bookmark_data - getting kepub chapters: contentId={self.contentId}")
+ debug_print(f'Kobo::Bookmark::get_bookmark_data - getting kepub chapters: contentId={self.contentId}')
cursor.execute(kepub_chapter_query, book_query_values)
kepub_chapters = {}
if self.kepub:
@@ -66,9 +66,9 @@ class Bookmark(): # {{{
'chapter_title': chapter_row['Title'],
'chapter_index': chapter_row['VolumeIndex']
}
- debug_print(f"Kobo::Bookmark::get_bookmark_data - getting kepub chapter: kepub chapters={kepub_chapters}")
+ debug_print(f'Kobo::Bookmark::get_bookmark_data - getting kepub chapter: kepub chapters={kepub_chapters}')
except:
- debug_print("Kobo::Bookmark::get_bookmark_data - No chapters found")
+ debug_print('Kobo::Bookmark::get_bookmark_data - No chapters found')
cursor.execute(bookmark_query, book_query_values)
@@ -92,7 +92,7 @@ class Bookmark(): # {{{
debug_print(f"Kobo::Bookmark::get_bookmark_data - getting kepub: chapter file_contentID_part='{file_contentID_part}'")
# from urllib import quote
# file_contentID_part = quote(file_contentID_part)
- chapter_contentID = book_contentID_part + "!" + opf_reference + "!" + file_contentID_part
+ chapter_contentID = book_contentID_part + '!' + opf_reference + '!' + file_contentID_part
debug_print(f"Kobo::Bookmark::get_bookmark_data - getting kepub chapter chapter_contentID='{chapter_contentID}'")
kepub_chapter = kepub_chapters.get(chapter_contentID, None)
if kepub_chapter is not None:
@@ -115,7 +115,7 @@ class Bookmark(): # {{{
e_type = 'Bookmark'
text = row['Title']
# highlight is text with no annotation
- elif text is not None and (annotation is None or annotation == ""):
+ elif text is not None and (annotation is None or annotation == ''):
e_type = 'Highlight'
elif text and annotation:
e_type = 'Annotation'
@@ -165,7 +165,7 @@ class Bookmark(): # {{{
A string representation of this object, suitable for printing to
console
'''
- ans = ["Kobo bookmark:"]
+ ans = ['Kobo bookmark:']
def fmt(x, y):
ans.append('%-20s: %s'%(str(x), str(y)))
@@ -181,7 +181,7 @@ class Bookmark(): # {{{
if self.user_notes:
fmt('User Notes', self.user_notes)
- ans = '\n'.join(ans) + "\n"
+ ans = '\n'.join(ans) + '\n'
return ans
diff --git a/src/calibre/devices/kobo/books.py b/src/calibre/devices/kobo/books.py
index 4e4f8b5aac..e8d62a67d2 100644
--- a/src/calibre/devices/kobo/books.py
+++ b/src/calibre/devices/kobo/books.py
@@ -22,13 +22,13 @@ class Book(Book_):
thumbnail_name=None, size=None, other=None):
from calibre.utils.date import parse_date
# debug_print('Book::__init__ - title=', title)
- show_debug = title is not None and title.lower().find("xxxxx") >= 0
+ show_debug = title is not None and title.lower().find('xxxxx') >= 0
if other is not None:
other.title = title
other.published_date = date
if show_debug:
- debug_print("Book::__init__ - title=", title, 'authors=', authors)
- debug_print("Book::__init__ - other=", other)
+ debug_print('Book::__init__ - title=', title, 'authors=', authors)
+ debug_print('Book::__init__ - other=', other)
super().__init__(prefix, lpath, size, other)
if title is not None and len(title) > 0:
@@ -36,7 +36,7 @@ class Book(Book_):
if authors is not None and len(authors) > 0:
self.authors_from_string(authors)
- if self.author_sort is None or self.author_sort == "Unknown":
+ if self.author_sort is None or self.author_sort == 'Unknown':
self.author_sort = author_to_author_sort(authors)
self.mime = mime
@@ -45,13 +45,13 @@ class Book(Book_):
if ContentType == '6' and date is not None:
try:
- self.datetime = time.strptime(date, "%Y-%m-%dT%H:%M:%S.%f")
+ self.datetime = time.strptime(date, '%Y-%m-%dT%H:%M:%S.%f')
except:
try:
- self.datetime = time.strptime(date.split('+')[0], "%Y-%m-%dT%H:%M:%S")
+ self.datetime = time.strptime(date.split('+')[0], '%Y-%m-%dT%H:%M:%S')
except:
try:
- self.datetime = time.strptime(date.split('+')[0], "%Y-%m-%d")
+ self.datetime = time.strptime(date.split('+')[0], '%Y-%m-%d')
except:
try:
self.datetime = parse_date(date,
@@ -77,13 +77,13 @@ class Book(Book_):
self.thumbnail = ImageWrapper(thumbnail_name)
if show_debug:
- debug_print("Book::__init__ end - self=", self)
- debug_print("Book::__init__ end - title=", title, 'authors=', authors)
+ debug_print('Book::__init__ end - self=', self)
+ debug_print('Book::__init__ end - title=', title, 'authors=', authors)
@property
def is_sideloaded(self):
# If we don't have a content Id, we don't know what type it is.
- return self.contentID and self.contentID.startswith("file")
+ return self.contentID and self.contentID.startswith('file')
@property
def has_kobo_series(self):
@@ -91,14 +91,14 @@ class Book(Book_):
@property
def is_purchased_kepub(self):
- return self.contentID and not self.contentID.startswith("file")
+ return self.contentID and not self.contentID.startswith('file')
def __str__(self):
'''
A string representation of this object, suitable for printing to
console
'''
- ans = ["Kobo metadata:"]
+ ans = ['Kobo metadata:']
def fmt(x, y):
ans.append('%-20s: %s'%(str(x), str(y)))
@@ -118,7 +118,7 @@ class Book(Book_):
ans = '\n'.join(ans)
- return super().__str__() + "\n" + ans
+ return super().__str__() + '\n' + ans
class ImageWrapper:
@@ -134,7 +134,7 @@ class KTCollectionsBookList(CollectionsBookList):
self.set_device_managed_collections([])
def get_collections(self, collection_attributes, collections_template=None, template_globals=None):
- debug_print("KTCollectionsBookList:get_collections - start - collection_attributes=", collection_attributes)
+ debug_print('KTCollectionsBookList:get_collections - start - collection_attributes=', collection_attributes)
collections = {}
@@ -142,7 +142,7 @@ class KTCollectionsBookList(CollectionsBookList):
for c in collection_attributes:
ca.append(c.lower())
collection_attributes = ca
- debug_print("KTCollectionsBookList:get_collections - collection_attributes=", collection_attributes)
+ debug_print('KTCollectionsBookList:get_collections - collection_attributes=', collection_attributes)
for book in self:
tsval = book.get('title_sort', book.title)
@@ -151,7 +151,7 @@ class KTCollectionsBookList(CollectionsBookList):
show_debug = self.is_debugging_title(tsval) or tsval is None
if show_debug: # or len(book.device_collections) > 0:
- debug_print('KTCollectionsBookList:get_collections - tsval=', tsval, "book.title=", book.title, "book.title_sort=", book.title_sort)
+ debug_print('KTCollectionsBookList:get_collections - tsval=', tsval, 'book.title=', book.title, 'book.title_sort=', book.title_sort)
debug_print('KTCollectionsBookList:get_collections - book.device_collections=', book.device_collections)
# debug_print(book)
# Make sure we can identify this book via the lpath
@@ -168,7 +168,7 @@ class KTCollectionsBookList(CollectionsBookList):
# book in all existing collections. Do not add any new ones.
attrs = ['device_collections']
if getattr(book, '_new_book', False):
- debug_print("KTCollectionsBookList:get_collections - sending new book")
+ debug_print('KTCollectionsBookList:get_collections - sending new book')
if prefs['manage_device_metadata'] == 'manual':
# Ensure that the book is in all the book's existing
# collections plus all metadata collections
@@ -187,14 +187,14 @@ class KTCollectionsBookList(CollectionsBookList):
if cat_name not in collections:
collections[cat_name] = {}
if show_debug:
- debug_print("KTCollectionsBookList:get_collections - Device Managed Collection:", cat_name)
+ debug_print('KTCollectionsBookList:get_collections - Device Managed Collection:', cat_name)
if lpath not in collections[cat_name]:
collections[cat_name][lpath] = book
if show_debug:
- debug_print("KTCollectionsBookList:get_collections - Device Managed Collection -added book to cat_name", cat_name)
+ debug_print('KTCollectionsBookList:get_collections - Device Managed Collection -added book to cat_name', cat_name)
book.device_collections = []
if show_debug:
- debug_print("KTCollectionsBookList:get_collections - attrs=", attrs)
+ debug_print('KTCollectionsBookList:get_collections - attrs=', attrs)
if collections_template is not None:
attrs.append('%template%')
@@ -212,7 +212,7 @@ class KTCollectionsBookList(CollectionsBookList):
doing_dc = True
val = book.device_collections # is a list
if show_debug:
- debug_print("KTCollectionsBookList:get_collections - adding book.device_collections", book.device_collections)
+ debug_print('KTCollectionsBookList:get_collections - adding book.device_collections', book.device_collections)
elif attr == '%template%':
doing_dc = False
val = ''
@@ -220,7 +220,7 @@ class KTCollectionsBookList(CollectionsBookList):
nv = SafeFormat().safe_format(collections_template, book,
'KOBO', book, global_vars=template_globals)
if show_debug:
- debug_print("KTCollectionsBookList:get_collections collections_template - result", nv)
+ debug_print('KTCollectionsBookList:get_collections collections_template - result', nv)
if nv:
val = [v.strip() for v in nv.split(':@:') if v.strip()]
else:
@@ -228,7 +228,7 @@ class KTCollectionsBookList(CollectionsBookList):
ign, val, orig_val, fm = book.format_field_extended(attr)
val = book.get(attr, None)
if show_debug:
- debug_print("KTCollectionsBookList:get_collections - not device_collections")
+ debug_print('KTCollectionsBookList:get_collections - not device_collections')
debug_print(' ign=', ign, ', val=', val, ' orig_val=', orig_val, 'fm=', fm)
debug_print(' val=', val)
@@ -249,16 +249,16 @@ class KTCollectionsBookList(CollectionsBookList):
else:
val = [orig_val]
if show_debug:
- debug_print("KTCollectionsBookList:get_collections - val is text and multiple", val)
+ debug_print('KTCollectionsBookList:get_collections - val is text and multiple', val)
elif fm is not None and fm['datatype'] == 'composite' and fm['is_multiple']:
if show_debug:
- debug_print("KTCollectionsBookList:get_collections - val is compositeand multiple", val)
+ debug_print('KTCollectionsBookList:get_collections - val is compositeand multiple', val)
val = [v.strip() for v in
val.split(fm['is_multiple']['ui_to_list'])]
else:
val = [val]
if show_debug:
- debug_print("KTCollectionsBookList:get_collections - val=", val)
+ debug_print('KTCollectionsBookList:get_collections - val=', val)
for category in val:
# debug_print("KTCollectionsBookList:get_collections - category=", category)
@@ -282,13 +282,13 @@ class KTCollectionsBookList(CollectionsBookList):
if cat_name not in collections:
collections[cat_name] = {}
if show_debug:
- debug_print("KTCollectionsBookList:get_collections - created collection for cat_name", cat_name)
+ debug_print('KTCollectionsBookList:get_collections - created collection for cat_name', cat_name)
if lpath not in collections[cat_name]:
collections[cat_name][lpath] = book
if show_debug:
- debug_print("KTCollectionsBookList:get_collections - added book to collection for cat_name", cat_name)
+ debug_print('KTCollectionsBookList:get_collections - added book to collection for cat_name', cat_name)
if show_debug:
- debug_print("KTCollectionsBookList:get_collections - cat_name", cat_name)
+ debug_print('KTCollectionsBookList:get_collections - cat_name', cat_name)
# Sort collections
result = {}
@@ -296,7 +296,7 @@ class KTCollectionsBookList(CollectionsBookList):
for category, lpaths in collections.items():
result[category] = lpaths.values()
# debug_print("KTCollectionsBookList:get_collections - result=", result.keys())
- debug_print("KTCollectionsBookList:get_collections - end")
+ debug_print('KTCollectionsBookList:get_collections - end')
return result
def set_device_managed_collections(self, collection_names):
diff --git a/src/calibre/devices/kobo/driver.py b/src/calibre/devices/kobo/driver.py
index d1a7b739ae..44fdde439e 100644
--- a/src/calibre/devices/kobo/driver.py
+++ b/src/calibre/devices/kobo/driver.py
@@ -37,7 +37,7 @@ from polyglot.builtins import iteritems, itervalues, string_or_bytes
EPUB_EXT = '.epub'
KEPUB_EXT = '.kepub'
-KOBO_ROOT_DIR_NAME = ".kobo"
+KOBO_ROOT_DIR_NAME = '.kobo'
DEFAULT_COVER_LETTERBOX_COLOR = '#000000'
@@ -45,11 +45,11 @@ DEFAULT_COVER_LETTERBOX_COLOR = '#000000'
def qhash(inputstr):
- instr = b""
+ instr = b''
if isinstance(inputstr, bytes):
instr = inputstr
elif isinstance(inputstr, str):
- instr = inputstr.encode("utf8")
+ instr = inputstr.encode('utf8')
else:
return -1
@@ -206,15 +206,15 @@ class KOBO(USBMS):
return dbversion
def device_version_info(self):
- debug_print("device_version_info - start")
+ debug_print('device_version_info - start')
if not self._device_version_info:
- version_file = os.path.join(self._main_prefix, KOBO_ROOT_DIR_NAME, "version")
- debug_print(f"device_version_info - version_file={version_file}")
+ version_file = os.path.join(self._main_prefix, KOBO_ROOT_DIR_NAME, 'version')
+ debug_print(f'device_version_info - version_file={version_file}')
if os.path.isfile(version_file):
- debug_print("device_version_info - have opened version_file")
+ debug_print('device_version_info - have opened version_file')
with open(version_file) as vf:
- self._device_version_info = vf.read().strip().split(",")
- debug_print("device_version_info - self._device_version_info=", self._device_version_info)
+ self._device_version_info = vf.read().strip().split(',')
+ debug_print('device_version_info - self._device_version_info=', self._device_version_info)
return self._device_version_info
def device_serial_no(self):
@@ -334,7 +334,7 @@ class KOBO(USBMS):
# print "Image name Normalized: " + imagename
if not os.path.exists(imagename):
- debug_print("Strange - The image name does not exist - title: ", title)
+ debug_print('Strange - The image name does not exist - title: ', title)
if imagename is not None:
bl[idx].thumbnail = ImageWrapper(imagename)
if (ContentType != '6' and MimeType != 'Shortcover'):
@@ -343,7 +343,7 @@ class KOBO(USBMS):
# print 'update_metadata_item returned true'
changed = True
else:
- debug_print(" Strange: The file: ", prefix, lpath, " does not exist!")
+ debug_print(' Strange: The file: ', prefix, lpath, ' does not exist!')
if lpath in playlist_map and \
playlist_map[lpath] not in bl[idx].device_collections:
bl[idx].device_collections = playlist_map.get(lpath,[])
@@ -355,13 +355,13 @@ class KOBO(USBMS):
if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))):
book = self.book_from_path(prefix, lpath, title, authors, mime, date, ContentType, ImageID)
else:
- debug_print(" Strange: The file: ", prefix, lpath, " does not exist!")
- title = "FILE MISSING: " + title
+ debug_print(' Strange: The file: ', prefix, lpath, ' does not exist!')
+ title = 'FILE MISSING: ' + title
book = self.book_class(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=1048576)
except:
- debug_print("prefix: ", prefix, "lpath: ", lpath, "title: ", title, "authors: ", authors,
- "mime: ", mime, "date: ", date, "ContentType: ", ContentType, "ImageID: ", ImageID)
+ debug_print('prefix: ', prefix, 'lpath: ', lpath, 'title: ', title, 'authors: ', authors,
+ 'mime: ', mime, 'date: ', date, 'ContentType: ', ContentType, 'ImageID: ', ImageID)
raise
# print 'Update booklist'
@@ -377,7 +377,7 @@ class KOBO(USBMS):
with closing(self.device_database_connection(use_row_factory=True)) as connection:
self.dbversion = self.get_database_version(connection)
- debug_print("Database Version: ", self.dbversion)
+ debug_print('Database Version: ', self.dbversion)
cursor = connection.cursor()
opts = self.settings()
@@ -387,7 +387,7 @@ class KOBO(USBMS):
'BookID is Null %(previews)s %(recommendations)s and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s') % dict(
expiry=' and ContentType = 6)' if opts.extra_customization[self.OPT_SHOW_EXPIRED_BOOK_RECORDS] else ')',
previews=' and Accessibility <> 6' if not self.show_previews else '',
- recommendations=' and IsDownloaded in (\'true\', 1)' if opts.extra_customization[self.OPT_SHOW_RECOMMENDATIONS] is False else '')
+ recommendations=" and IsDownloaded in ('true', 1)" if opts.extra_customization[self.OPT_SHOW_RECOMMENDATIONS] is False else '')
elif self.dbversion >= 16 and self.dbversion < 33:
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, Accessibility, "1" as IsDownloaded from content where '
@@ -423,15 +423,15 @@ class KOBO(USBMS):
changed = False
for row in cursor:
# self.report_progress((i+1) / float(numrows), _('Getting list of books on device...'))
- if not hasattr(row['ContentID'], 'startswith') or row['ContentID'].startswith("file:///usr/local/Kobo/help/"):
+ if not hasattr(row['ContentID'], 'startswith') or row['ContentID'].startswith('file:///usr/local/Kobo/help/'):
# These are internal to the Kobo device and do not exist
continue
path = self.path_from_contentid(row['ContentID'], row['ContentType'], row['MimeType'], oncard)
mime = mime_type_ext(path_to_ext(path)) if path.find('kepub') == -1 else 'application/epub+zip'
# debug_print("mime:", mime)
- if oncard != 'carda' and oncard != 'cardb' and not row['ContentID'].startswith("file:///mnt/sd/"):
+ if oncard != 'carda' and oncard != 'cardb' and not row['ContentID'].startswith('file:///mnt/sd/'):
prefix = self._main_prefix
- elif oncard == 'carda' and row['ContentID'].startswith("file:///mnt/sd/"):
+ elif oncard == 'carda' and row['ContentID'].startswith('file:///mnt/sd/'):
prefix = self._card_a_prefix
changed = update_booklist(self._main_prefix, path,
row['Title'], row['Attribution'], mime, row['DateCreated'], row['ContentType'],
@@ -514,26 +514,26 @@ class KOBO(USBMS):
cursor.execute('delete from content where BookID = ?', t)
if ContentType == 6:
try:
- cursor.execute('update content set ReadStatus=0, FirstTimeReading = \'true\', ___PercentRead=0, ___ExpirationStatus=3 '
+ cursor.execute("update content set ReadStatus=0, FirstTimeReading = 'true', ___PercentRead=0, ___ExpirationStatus=3 "
'where BookID is Null and ContentID =?',t)
except Exception as e:
if 'no such column' not in str(e):
raise
try:
- cursor.execute('update content set ReadStatus=0, FirstTimeReading = \'true\', ___PercentRead=0 '
+ cursor.execute("update content set ReadStatus=0, FirstTimeReading = 'true', ___PercentRead=0 "
'where BookID is Null and ContentID =?',t)
except Exception as e:
if 'no such column' not in str(e):
raise
- cursor.execute('update content set ReadStatus=0, FirstTimeReading = \'true\' '
+ cursor.execute("update content set ReadStatus=0, FirstTimeReading = 'true' "
'where BookID is Null and ContentID =?',t)
else:
cursor.execute('delete from content where BookID is Null and ContentID =?',t)
cursor.close()
if ImageID is None:
- print("Error condition ImageID was not found")
- print("You likely tried to delete a book that the kobo has not yet added to the database")
+ print('Error condition ImageID was not found')
+ print('You likely tried to delete a book that the kobo has not yet added to the database')
# If all this succeeds we need to delete the images files via the ImageID
return ImageID
@@ -555,7 +555,7 @@ class KOBO(USBMS):
os.unlink(fpath)
def delete_books(self, paths, end_session=True):
- if self.modify_database_check("delete_books") is False:
+ if self.modify_database_check('delete_books') is False:
return
for i, path in enumerate(paths):
@@ -594,7 +594,7 @@ class KOBO(USBMS):
self.report_progress(1.0, _('Removing books from device...'))
def remove_books_from_metadata(self, paths, booklists):
- if self.modify_database_check("remove_books_from_metatata") is False:
+ if self.modify_database_check('remove_books_from_metatata') is False:
return
for i, path in enumerate(paths):
@@ -608,12 +608,12 @@ class KOBO(USBMS):
self.report_progress(1.0, _('Removing books from device metadata listing...'))
def add_books_to_metadata(self, locations, metadata, booklists):
- debug_print("KoboTouch::add_books_to_metadata - start. metadata=%s" % metadata[0])
+ debug_print('KoboTouch::add_books_to_metadata - start. metadata=%s' % metadata[0])
metadata = iter(metadata)
for i, location in enumerate(locations):
self.report_progress((i+1) / float(len(locations)), _('Adding books to device metadata listing...'))
info = next(metadata)
- debug_print("KoboTouch::add_books_to_metadata - info=%s" % info)
+ debug_print('KoboTouch::add_books_to_metadata - info=%s' % info)
blist = 2 if location[1] == 'cardb' else 1 if location[1] == 'carda' else 0
# Extract the correct prefix from the pathname. To do this correctly,
@@ -645,7 +645,7 @@ class KOBO(USBMS):
book.size = os.stat(self.normalize_path(path)).st_size
b = booklists[blist].add_book(book, replace_metadata=True)
if b:
- debug_print("KoboTouch::add_books_to_metadata - have a new book - book=%s" % book)
+ debug_print('KoboTouch::add_books_to_metadata - have a new book - book=%s' % book)
b._new_book = True
self.report_progress(1.0, _('Adding books to device metadata listing...'))
@@ -664,15 +664,15 @@ class KOBO(USBMS):
ContentID = ContentID.replace(self._card_a_prefix, '')
elif ContentType == 999: # HTML Files
ContentID = path
- ContentID = ContentID.replace(self._main_prefix, "/mnt/onboard/")
+ ContentID = ContentID.replace(self._main_prefix, '/mnt/onboard/')
if self._card_a_prefix is not None:
- ContentID = ContentID.replace(self._card_a_prefix, "/mnt/sd/")
+ ContentID = ContentID.replace(self._card_a_prefix, '/mnt/sd/')
else: # ContentType = 16
ContentID = path
- ContentID = ContentID.replace(self._main_prefix, "file:///mnt/onboard/")
+ ContentID = ContentID.replace(self._main_prefix, 'file:///mnt/onboard/')
if self._card_a_prefix is not None:
- ContentID = ContentID.replace(self._card_a_prefix, "file:///mnt/sd/")
- ContentID = ContentID.replace("\\", '/')
+ ContentID = ContentID.replace(self._card_a_prefix, 'file:///mnt/sd/')
+ ContentID = ContentID.replace('\\', '/')
return ContentID
def get_content_type_from_path(self, path):
@@ -707,28 +707,28 @@ class KOBO(USBMS):
if oncard == 'cardb':
print('path from_contentid cardb')
elif oncard == 'carda':
- path = path.replace("file:///mnt/sd/", self._card_a_prefix)
+ path = path.replace('file:///mnt/sd/', self._card_a_prefix)
# print "SD Card: " + path
else:
- if ContentType == "6" and MimeType == 'Shortcover':
+ if ContentType == '6' and MimeType == 'Shortcover':
# This is a hack as the kobo files do not exist
# but the path is required to make a unique id
# for calibre's reference
path = self._main_prefix + path + '.kobo'
# print "Path: " + path
- elif (ContentType == "6" or ContentType == "10") and (
+ elif (ContentType == '6' or ContentType == '10') and (
MimeType == 'application/x-kobo-epub+zip' or (
MimeType == 'application/epub+zip' and self.isTolinoDevice())
):
- if path.startswith("file:///mnt/onboard/"):
- path = self._main_prefix + path.replace("file:///mnt/onboard/", '')
+ if path.startswith('file:///mnt/onboard/'):
+ path = self._main_prefix + path.replace('file:///mnt/onboard/', '')
else:
path = self._main_prefix + KOBO_ROOT_DIR_NAME + '/kepub/' + path
# print "Internal: " + path
else:
# if path.startswith("file:///mnt/onboard/"):
- path = path.replace("file:///mnt/onboard/", self._main_prefix)
- path = path.replace("/mnt/onboard/", self._main_prefix)
+ path = path.replace('file:///mnt/onboard/', self._main_prefix)
+ path = path.replace('/mnt/onboard/', self._main_prefix)
# print "Internal: " + path
return path
@@ -743,7 +743,7 @@ class KOBO(USBMS):
debug_print('The database has been upgraded past supported version')
self.report_progress(1.0, _('Removing books from device...'))
from calibre.devices.errors import UserFeedback
- raise UserFeedback(_("Kobo database version unsupported - See details"),
+ raise UserFeedback(_('Kobo database version unsupported - See details'),
_('Your Kobo is running an updated firmware/database version.'
' As calibre does not know about this updated firmware,'
' database editing is disabled, to prevent corruption.'
@@ -773,7 +773,7 @@ class KOBO(USBMS):
extension = os.path.splitext(tpath)[1]
if extension == '.kobo':
from calibre.devices.errors import UserFeedback
- raise UserFeedback(_("Not Implemented"),
+ raise UserFeedback(_('Not Implemented'),
_('".kobo" files do not exist on the device as books; '
'instead they are rows in the sqlite database. '
'Currently they cannot be exported or viewed.'),
@@ -817,9 +817,9 @@ class KOBO(USBMS):
# Reset Im_Reading list in the database
if oncard == 'carda':
- query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ContentID like \'file:///mnt/sd/%\''
+ query= "update content set ReadStatus=0, FirstTimeReading = 'true' where BookID is Null and ContentID like 'file:///mnt/sd/%'"
elif oncard != 'carda' and oncard != 'cardb':
- query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ContentID not like \'file:///mnt/sd/%\''
+ query= "update content set ReadStatus=0, FirstTimeReading = 'true' where BookID is Null and ContentID not like 'file:///mnt/sd/%'"
try:
cursor.execute(query)
@@ -830,7 +830,7 @@ class KOBO(USBMS):
cursor.close()
def set_readstatus(self, connection, ContentID, ReadStatus):
- debug_print("Kobo::set_readstatus - ContentID=%s, ReadStatus=%d" % (ContentID, ReadStatus))
+ debug_print('Kobo::set_readstatus - ContentID=%s, ReadStatus=%d' % (ContentID, ReadStatus))
cursor = connection.cursor()
t = (ContentID,)
cursor.execute('select DateLastRead, ReadStatus from Content where BookID is Null and ContentID = ?', t)
@@ -851,8 +851,8 @@ class KOBO(USBMS):
t = (ReadStatus, datelastread, ContentID,)
try:
- debug_print("Kobo::set_readstatus - Making change - ContentID=%s, ReadStatus=%d, DateLastRead=%s" % (ContentID, ReadStatus, datelastread))
- cursor.execute('update content set ReadStatus=?,FirstTimeReading=\'false\',DateLastRead=? where BookID is Null and ContentID = ?', t)
+ debug_print('Kobo::set_readstatus - Making change - ContentID=%s, ReadStatus=%d, DateLastRead=%s' % (ContentID, ReadStatus, datelastread))
+ cursor.execute("update content set ReadStatus=?,FirstTimeReading='false',DateLastRead=? where BookID is Null and ContentID = ?", t)
except:
debug_print(' Database Exception: Unable to update ReadStatus')
raise
@@ -862,9 +862,9 @@ class KOBO(USBMS):
def reset_favouritesindex(self, connection, oncard):
# Reset FavouritesIndex list in the database
if oncard == 'carda':
- query= 'update content set FavouritesIndex=-1 where BookID is Null and ContentID like \'file:///mnt/sd/%\''
+ query= "update content set FavouritesIndex=-1 where BookID is Null and ContentID like 'file:///mnt/sd/%'"
elif oncard != 'carda' and oncard != 'cardb':
- query= 'update content set FavouritesIndex=-1 where BookID is Null and ContentID not like \'file:///mnt/sd/%\''
+ query= "update content set FavouritesIndex=-1 where BookID is Null and ContentID not like 'file:///mnt/sd/%'"
cursor = connection.cursor()
try:
@@ -892,28 +892,28 @@ class KOBO(USBMS):
def update_device_database_collections(self, booklists, collections_attributes, oncard):
debug_print("Kobo:update_device_database_collections - oncard='%s'"%oncard)
- if self.modify_database_check("update_device_database_collections") is False:
+ if self.modify_database_check('update_device_database_collections') is False:
return
# Only process categories in this list
supportedcategories = {
- "Im_Reading":1,
- "Read":2,
- "Closed":3,
- "Shortlist":4,
+ 'Im_Reading':1,
+ 'Read':2,
+ 'Closed':3,
+ 'Shortlist':4,
# "Preview":99, # Unsupported as we don't want to change it
}
# Define lists for the ReadStatus
readstatuslist = {
- "Im_Reading":1,
- "Read":2,
- "Closed":3,
+ 'Im_Reading':1,
+ 'Read':2,
+ 'Closed':3,
}
accessibilitylist = {
- "Preview":6,
- "Recommendation":4,
+ 'Preview':6,
+ 'Recommendation':4,
}
# debug_print('Starting update_device_database_collections', collections_attributes)
@@ -964,10 +964,10 @@ class KOBO(USBMS):
pass
else: # No collections
# Since no collections exist the ReadStatus needs to be reset to 0 (Unread)
- debug_print("No Collections - resetting ReadStatus")
+ debug_print('No Collections - resetting ReadStatus')
self.reset_readstatus(connection, oncard)
if self.dbversion >= 14:
- debug_print("No Collections - resetting FavouritesIndex")
+ debug_print('No Collections - resetting FavouritesIndex')
self.reset_favouritesindex(connection, oncard)
# debug_print('Finished update_device_database_collections', collections_attributes)
@@ -1076,7 +1076,7 @@ class KOBO(USBMS):
# debug_print("ImageId: ", result[0])
ImageID = result[0]
except StopIteration:
- debug_print("No rows exist in the database - cannot upload")
+ debug_print('No rows exist in the database - cannot upload')
return
finally:
cursor.close()
@@ -1111,7 +1111,7 @@ class KOBO(USBMS):
fsync(f)
else:
- debug_print("ImageID could not be retrieved from the database")
+ debug_print('ImageID could not be retrieved from the database')
def prepare_addable_books(self, paths):
'''
@@ -1234,7 +1234,7 @@ class KOBO(USBMS):
extension = os.path.splitext(path_map[book_id])[1]
ContentType = self.get_content_type_from_extension(extension) if extension else self.get_content_type_from_path(path_map[book_id])
ContentID = self.contentid_from_path(path_map[book_id], ContentType)
- debug_print("get_annotations - ContentID: ", ContentID, "ContentType: ", ContentType)
+ debug_print('get_annotations - ContentID: ', ContentID, 'ContentType: ', ContentType)
bookmark_ext = extension
@@ -1252,21 +1252,21 @@ class KOBO(USBMS):
# last_read_location = bookmark.last_read_location
# timestamp = bookmark.timestamp
percent_read = bookmark.percent_read
- debug_print("Kobo::generate_annotation_html - last_read: ", bookmark.last_read)
+ debug_print('Kobo::generate_annotation_html - last_read: ', bookmark.last_read)
if bookmark.last_read is not None:
try:
- last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(calendar.timegm(time.strptime(bookmark.last_read, "%Y-%m-%dT%H:%M:%S"))))
+ last_read = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(calendar.timegm(time.strptime(bookmark.last_read, '%Y-%m-%dT%H:%M:%S'))))
except:
try:
- last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(calendar.timegm(time.strptime(bookmark.last_read, "%Y-%m-%dT%H:%M:%S.%f"))))
+ last_read = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(calendar.timegm(time.strptime(bookmark.last_read, '%Y-%m-%dT%H:%M:%S.%f'))))
except:
try:
- last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(calendar.timegm(time.strptime(bookmark.last_read, "%Y-%m-%dT%H:%M:%SZ"))))
+ last_read = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(calendar.timegm(time.strptime(bookmark.last_read, '%Y-%m-%dT%H:%M:%SZ'))))
except:
- last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
+ last_read = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime())
else:
# self.datetime = time.gmtime()
- last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
+ last_read = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime())
# debug_print("Percent read: ", percent_read)
ka_soup = BeautifulSoup()
@@ -1276,12 +1276,12 @@ class KOBO(USBMS):
# Add the last-read location
if bookmark.book_format == 'epub':
- markup = _("
Book last read: %(time)s
Percentage read: %(pr)d%%
") % dict(
+ markup = _('
Book last read: %(time)s
Percentage read: %(pr)d%%
') % dict(
time=last_read,
# loc=last_read_location,
pr=percent_read)
else:
- markup = _("
Book last read: %(time)s
Percentage read: %(pr)d%%
") % dict(
+ markup = _('
Book last read: %(time)s
Percentage read: %(pr)d%%
') % dict(
time=last_read,
# loc=last_read_location,
pr=percent_read)
@@ -1307,7 +1307,7 @@ class KOBO(USBMS):
typ=user_notes[location]['type'],
chapter_title=user_notes[location]['chapter_title'],
chapter_progress=user_notes[location]['chapter_progress'],
- annotation=user_notes[location]['annotation'] if user_notes[location]['annotation'] is not None else ""))
+ annotation=user_notes[location]['annotation'] if user_notes[location]['annotation'] is not None else ''))
elif user_notes[location]['type'] == 'Highlight':
annotations.append(
_('Chapter %(chapter)d: %(chapter_title)s
%(typ)s
'
@@ -1358,7 +1358,7 @@ class KOBO(USBMS):
if bm.type == 'kobo_bookmark' and bm.value.last_read:
mi = db.get_metadata(db_id, index_is_id=True)
- debug_print("KOBO:add_annotation_to_library - Title: ", mi.title)
+ debug_print('KOBO:add_annotation_to_library - Title: ', mi.title)
user_notes_soup = self.generate_annotation_html(bm.value)
if mi.comments:
a_offset = mi.comments.find('')
@@ -1465,7 +1465,7 @@ class KOBOTOUCH(KOBO):
opts = None
- TIMESTAMP_STRING = "%Y-%m-%dT%H:%M:%SZ"
+ TIMESTAMP_STRING = '%Y-%m-%dT%H:%M:%SZ'
AURA_PRODUCT_ID = [0x4203]
AURA_EDITION2_PRODUCT_ID = [0x4226]
@@ -1693,7 +1693,7 @@ class KOBOTOUCH(KOBO):
return dummy_bl
elif oncard and oncard != 'carda' and oncard != 'cardb':
self.report_progress(1.0, _('Getting list of books on device...'))
- debug_print("KoboTouch:books - unknown card")
+ debug_print('KoboTouch:books - unknown card')
return dummy_bl
prefix = self._card_a_prefix if oncard == 'carda' else \
@@ -1714,15 +1714,15 @@ class KOBOTOUCH(KOBO):
bl = self.booklist_class(oncard, prefix, self.settings)
opts = self.settings()
- debug_print("KoboTouch:books - opts.extra_customization=", opts.extra_customization)
- debug_print("KoboTouch:books - driver options=", self)
+ debug_print('KoboTouch:books - opts.extra_customization=', opts.extra_customization)
+ debug_print('KoboTouch:books - driver options=', self)
debug_print("KoboTouch:books - prefs['manage_device_metadata']=", prefs['manage_device_metadata'])
debugging_title = self.debugging_title
debug_print("KoboTouch:books - set_debugging_title to '%s'" % debugging_title)
bl.set_debugging_title(debugging_title)
- debug_print("KoboTouch:books - length bl=%d"%len(bl))
+ debug_print('KoboTouch:books - length bl=%d'%len(bl))
need_sync = self.parse_metadata_cache(bl, prefix, self.METADATA_CACHE)
- debug_print("KoboTouch:books - length bl after sync=%d"%len(bl))
+ debug_print('KoboTouch:books - length bl after sync=%d'%len(bl))
# make a dict cache of paths so the lookup in the loop below is faster.
bl_cache = {}
@@ -1739,9 +1739,9 @@ class KOBOTOUCH(KOBO):
show_debug = self.is_debugging_title(title)
# show_debug = authors == 'L. Frank Baum'
if show_debug:
- debug_print("KoboTouch:update_booklist - title='%s'"%title, "ContentType=%s"%ContentType, "isdownloaded=", isdownloaded)
+ debug_print("KoboTouch:update_booklist - title='%s'"%title, 'ContentType=%s'%ContentType, 'isdownloaded=', isdownloaded)
debug_print(
- " prefix=%s, DateCreated=%s, readstatus=%d, MimeType=%s, expired=%d, favouritesindex=%d, accessibility=%d, isdownloaded=%s"%
+ ' prefix=%s, DateCreated=%s, readstatus=%d, MimeType=%s, expired=%d, favouritesindex=%d, accessibility=%d, isdownloaded=%s'%
(prefix, DateCreated, readstatus, MimeType, expired, favouritesindex, accessibility, isdownloaded,))
changed = False
try:
@@ -1786,7 +1786,7 @@ class KOBOTOUCH(KOBO):
playlist_map[lpath].append('Deleted')
allow_shelves = False
if show_debug:
- debug_print("KoboTouch:update_booklist - have a deleted book")
+ debug_print('KoboTouch:update_booklist - have a deleted book')
elif self.supports_kobo_archive() and (accessibility == 1 or accessibility == 2):
playlist_map[lpath].append('Archived')
allow_shelves = True
@@ -1823,7 +1823,7 @@ class KOBOTOUCH(KOBO):
# print "Normalized FileName: " + path
# Collect the Kobo metadata
- authors_list = [a.strip() for a in authors.split("&")] if authors is not None else [_('Unknown')]
+ authors_list = [a.strip() for a in authors.split('&')] if authors is not None else [_('Unknown')]
kobo_metadata = Metadata(title, authors_list)
kobo_metadata.series = series
kobo_metadata.series_index = seriesnumber
@@ -1836,7 +1836,7 @@ class KOBOTOUCH(KOBO):
kobo_metadata.pubdate = parse_date(DateCreated, assume_utc=True)
except:
try:
- kobo_metadata.pubdate = datetime.strptime(DateCreated, "%Y-%m-%dT%H:%M:%S.%fZ")
+ kobo_metadata.pubdate = datetime.strptime(DateCreated, '%Y-%m-%dT%H:%M:%S.%fZ')
except:
debug_print("KoboTouch:update_booklist - Cannot convert date - DateCreated='%s'"%DateCreated)
@@ -1844,8 +1844,8 @@ class KOBOTOUCH(KOBO):
if idx is not None: # and not (accessibility == 1 and isdownloaded == 'false'):
if show_debug:
self.debug_index = idx
- debug_print("KoboTouch:update_booklist - idx=%d"%idx)
- debug_print("KoboTouch:update_booklist - lpath=%s"%lpath)
+ debug_print('KoboTouch:update_booklist - idx=%d'%idx)
+ debug_print('KoboTouch:update_booklist - lpath=%s'%lpath)
debug_print('KoboTouch:update_booklist - bl[idx].device_collections=', bl[idx].device_collections)
debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map)
debug_print('KoboTouch:update_booklist - bookshelves=', bookshelves)
@@ -1867,8 +1867,8 @@ class KOBOTOUCH(KOBO):
# debug_print("KoboTouch:update_booklist - update_metadata_item returned true")
changed = True
else:
- debug_print(" Strange: The file: ", prefix, lpath, " does not exist!")
- debug_print("KoboTouch:update_booklist - book size=", bl[idx].size)
+ debug_print(' Strange: The file: ', prefix, lpath, ' does not exist!')
+ debug_print('KoboTouch:update_booklist - book size=', bl[idx].size)
if show_debug:
debug_print("KoboTouch:update_booklist - ContentID='%s'"%ContentID)
@@ -1897,8 +1897,8 @@ class KOBOTOUCH(KOBO):
debug_print('KoboTouch:update_booklist - updated bl[idx].device_collections=', bl[idx].device_collections)
debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map, 'changed=', changed)
# debug_print('KoboTouch:update_booklist - book=', bl[idx])
- debug_print("KoboTouch:update_booklist - book class=%s"%bl[idx].__class__)
- debug_print("KoboTouch:update_booklist - book title=%s"%bl[idx].title)
+ debug_print('KoboTouch:update_booklist - book class=%s'%bl[idx].__class__)
+ debug_print('KoboTouch:update_booklist - book title=%s'%bl[idx].title)
else:
if show_debug:
debug_print('KoboTouch:update_booklist - idx is none')
@@ -1907,16 +1907,16 @@ class KOBOTOUCH(KOBO):
book = self.book_from_path(prefix, lpath, title, authors, MimeType, DateCreated, ContentType, ImageID)
else:
if isdownloaded == 'true': # A recommendation or preview is OK to not have a file
- debug_print(" Strange: The file: ", prefix, lpath, " does not exist!")
- title = "FILE MISSING: " + title
+ debug_print(' Strange: The file: ', prefix, lpath, ' does not exist!')
+ title = 'FILE MISSING: ' + title
book = self.book_class(prefix, lpath, title, authors, MimeType, DateCreated, ContentType, ImageID, size=0)
if show_debug:
debug_print('KoboTouch:update_booklist - book file does not exist. ContentID="%s"'%ContentID)
except Exception as e:
debug_print("KoboTouch:update_booklist - exception creating book: '%s'"%str(e))
- debug_print(" prefix: ", prefix, "lpath: ", lpath, "title: ", title, "authors: ", authors,
- "MimeType: ", MimeType, "DateCreated: ", DateCreated, "ContentType: ", ContentType, "ImageID: ", ImageID)
+ debug_print(' prefix: ', prefix, 'lpath: ', lpath, 'title: ', title, 'authors: ', authors,
+ 'MimeType: ', MimeType, 'DateCreated: ', DateCreated, 'ContentType: ', ContentType, 'ImageID: ', ImageID)
raise
if show_debug:
@@ -1924,10 +1924,10 @@ class KOBOTOUCH(KOBO):
# debug_print(' resolution:', book.__class__.__mro__)
debug_print(" contentid: '%s'"%book.contentID)
debug_print(" title:'%s'"%book.title)
- debug_print(" the book:", book)
+ debug_print(' the book:', book)
debug_print(" author_sort:'%s'"%book.author_sort)
- debug_print(" bookshelves:", bookshelves)
- debug_print(" kobo_collections:", kobo_collections)
+ debug_print(' bookshelves:', bookshelves)
+ debug_print(' kobo_collections:', kobo_collections)
# print 'Update booklist'
book.device_collections = playlist_map.get(lpath,[]) # if lpath in playlist_map else []
@@ -1966,11 +1966,11 @@ class KOBOTOUCH(KOBO):
return bookshelves
cursor = connection.cursor()
- query = "select ShelfName " \
- "from ShelfContent " \
- "where ContentId = ? " \
- f"and _IsDeleted = {self.bool_for_query(False)} " \
- "and ShelfName is not null" # This should never be null, but it is protection against an error cause by a sync to the Kobo server
+ query = 'select ShelfName ' \
+ 'from ShelfContent ' \
+ 'where ContentId = ? ' \
+ f'and _IsDeleted = {self.bool_for_query(False)} ' \
+ 'and ShelfName is not null' # This should never be null, but it is protection against an error cause by a sync to the Kobo server
values = (ContentID, )
cursor.execute(query, values)
for i, row in enumerate(cursor):
@@ -1983,13 +1983,13 @@ class KOBOTOUCH(KOBO):
self.debug_index = 0
with closing(self.device_database_connection(use_row_factory=True)) as connection:
- debug_print("KoboTouch:books - reading device database")
+ debug_print('KoboTouch:books - reading device database')
self.dbversion = self.get_database_version(connection)
- debug_print("Database Version: ", self.dbversion)
+ debug_print('Database Version: ', self.dbversion)
self.bookshelvelist = self.get_bookshelflist(connection)
- debug_print("KoboTouch:books - shelf list:", self.bookshelvelist)
+ debug_print('KoboTouch:books - shelf list:', self.bookshelvelist)
columns = 'Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ImageId, ReadStatus, Description, Publisher '
if self.dbversion >= 16:
@@ -2005,15 +2005,15 @@ class KOBOTOUCH(KOBO):
else:
columns += ', NULL AS ISBN'
if self.supports_series():
- columns += ", Series, SeriesNumber, ___UserID, ExternalId, Subtitle"
+ columns += ', Series, SeriesNumber, ___UserID, ExternalId, Subtitle'
else:
columns += ', null as Series, null as SeriesNumber, ___UserID, null as ExternalId, null as Subtitle'
if self.supports_series_list:
- columns += ", SeriesID, SeriesNumberFloat"
+ columns += ', SeriesID, SeriesNumberFloat'
else:
columns += ', null as SeriesID, null as SeriesNumberFloat'
if self.supports_bookstats:
- columns += ", StorePages, StoreWordCount, StoreTimeToReadLowerEstimate, StoreTimeToReadUpperEstimate"
+ columns += ', StorePages, StoreWordCount, StoreTimeToReadLowerEstimate, StoreTimeToReadUpperEstimate'
else:
columns += ', null as StorePages, null as StoreWordCount, null as StoreTimeToReadLowerEstimate, null as StoreTimeToReadUpperEstimate'
@@ -2025,10 +2025,10 @@ class KOBOTOUCH(KOBO):
" %(previews)s %(recommendations)s ) " # Previews or Recommendations
) % \
dict(
- expiry="" if self.show_archived_books else "and IsDownloaded in ('true', 1)",
- previews=" OR (Accessibility in (6) AND ___UserID <> '')" if self.show_previews else "",
- recommendations=" OR (Accessibility IN (-1, 4, 6) AND ___UserId = '')" if self.show_recommendations else "",
- downloaded_accessibility="1,2,8,9" if self.supports_overdrive() else "1,2"
+ expiry='' if self.show_archived_books else "and IsDownloaded in ('true', 1)",
+ previews=" OR (Accessibility in (6) AND ___UserID <> '')" if self.show_previews else '',
+ recommendations=" OR (Accessibility IN (-1, 4, 6) AND ___UserId = '')" if self.show_recommendations else '',
+ downloaded_accessibility='1,2,8,9' if self.supports_overdrive() else '1,2'
)
elif self.supports_series():
where_clause = (" WHERE BookID IS NULL "
@@ -2036,9 +2036,9 @@ class KOBOTOUCH(KOBO):
" AND NOT ((___ExpirationStatus=3 OR ___ExpirationStatus is Null) %(expiry)s)"
) % \
dict(
- expiry=" AND ContentType = 6" if self.show_archived_books else "",
- previews=" or (Accessibility IN (6) AND ___UserID <> '')" if self.show_previews else "",
- recommendations=" or (Accessibility in (-1, 4, 6) AND ___UserId = '')" if self.show_recommendations else ""
+ expiry=' AND ContentType = 6' if self.show_archived_books else '',
+ previews=" or (Accessibility IN (6) AND ___UserID <> '')" if self.show_previews else '',
+ recommendations=" or (Accessibility in (-1, 4, 6) AND ___UserId = '')" if self.show_recommendations else ''
)
elif self.dbversion >= 33:
where_clause = (' WHERE BookID IS NULL %(previews)s %(recommendations)s AND NOT'
@@ -2047,7 +2047,7 @@ class KOBOTOUCH(KOBO):
dict(
expiry=' AND ContentType = 6' if self.show_archived_books else '',
previews=' AND Accessibility <> 6' if not self.show_previews else '',
- recommendations=' AND IsDownloaded IN (\'true\', 1)' if not self.show_recommendations else ''
+ recommendations=" AND IsDownloaded IN ('true', 1)" if not self.show_recommendations else ''
)
elif self.dbversion >= 16:
where_clause = (' WHERE BookID IS NULL '
@@ -2068,7 +2068,7 @@ class KOBOTOUCH(KOBO):
card_condition = " AND contentId LIKE 'file:///mnt/sd/%'" if oncard == 'carda' else " AND contentId NOT LIKE'file:///mnt/sd/%'"
query = 'SELECT ' + columns + ' FROM content ' + where_clause + card_condition
- debug_print("KoboTouch:books - query=", query)
+ debug_print('KoboTouch:books - query=', query)
cursor = connection.cursor()
try:
@@ -2093,17 +2093,17 @@ class KOBOTOUCH(KOBO):
# self.report_progress((i) / float(books_on_device), _('Getting list of books on device...'))
show_debug = self.is_debugging_title(row['Title'])
if show_debug:
- debug_print("KoboTouch:books - looping on database - row=%d" % i)
- debug_print("KoboTouch:books - title='%s'"%row['Title'], "authors=", row['Attribution'])
- debug_print("KoboTouch:books - row=", row)
+ debug_print('KoboTouch:books - looping on database - row=%d' % i)
+ debug_print("KoboTouch:books - title='%s'"%row['Title'], 'authors=', row['Attribution'])
+ debug_print('KoboTouch:books - row=', row)
if not hasattr(row['ContentID'], 'startswith') or row['ContentID'].lower().startswith(
- "file:///usr/local/kobo/help/") or row['ContentID'].lower().startswith("/usr/local/kobo/help/"):
+ 'file:///usr/local/kobo/help/') or row['ContentID'].lower().startswith('/usr/local/kobo/help/'):
# These are internal to the Kobo device and do not exist
continue
externalId = None if row['ExternalId'] and len(row['ExternalId']) == 0 else row['ExternalId']
path = self.path_from_contentid(row['ContentID'], row['ContentType'], row['MimeType'], oncard, externalId)
if show_debug:
- debug_print("KoboTouch:books - path='%s'"%path, " ContentID='%s'"%row['ContentID'], " externalId=%s" % externalId)
+ debug_print("KoboTouch:books - path='%s'"%path, " ContentID='%s'"%row['ContentID'], ' externalId=%s' % externalId)
bookshelves = get_bookshelvesforbook(connection, row['ContentID'])
@@ -2131,8 +2131,8 @@ class KOBOTOUCH(KOBO):
if not prefs['manage_device_metadata'] == 'on_connect':
self.dump_bookshelves(connection)
else:
- debug_print("KoboTouch:books - automatically managing metadata")
- debug_print("KoboTouch:books - self.kobo_series_dict=", self.kobo_series_dict)
+ debug_print('KoboTouch:books - automatically managing metadata')
+ debug_print('KoboTouch:books - self.kobo_series_dict=', self.kobo_series_dict)
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
@@ -2149,14 +2149,14 @@ class KOBOTOUCH(KOBO):
# Bypassing the KOBO sync_booklists as that does things we don't need to do
# Also forcing sync to see if this solves issues with updating shelves and matching books.
if need_sync or True: # self.count_found_in_bl != len(bl) or need_sync:
- debug_print("KoboTouch:books - about to sync_booklists")
+ debug_print('KoboTouch:books - about to sync_booklists')
if oncard == 'cardb':
USBMS.sync_booklists(self, (None, None, bl))
elif oncard == 'carda':
USBMS.sync_booklists(self, (None, bl, None))
else:
USBMS.sync_booklists(self, (bl, None, None))
- debug_print("KoboTouch:books - have done sync_booklists")
+ debug_print('KoboTouch:books - have done sync_booklists')
self.report_progress(1.0, _('Getting list of books on device...'))
debug_print("KoboTouch:books - end - oncard='%s'"%oncard)
@@ -2164,7 +2164,7 @@ class KOBOTOUCH(KOBO):
@classmethod
def book_from_path(cls, prefix, lpath, title, authors, mime, date, ContentType, ImageID):
- debug_print("KoboTouch:book_from_path - title=%s"%title)
+ debug_print('KoboTouch:book_from_path - title=%s'%title)
book = super().book_from_path(prefix, lpath, title, authors, mime, date, ContentType, ImageID)
# Kobo Audiobooks are directories with files in them.
@@ -2177,7 +2177,7 @@ class KOBOTOUCH(KOBO):
size = audiofile.stat().st_size
# debug_print("KoboTouch:book_from_path - size=", size)
book.size += size
- debug_print("KoboTouch:book_from_path - book.size=", book.size)
+ debug_print('KoboTouch:book_from_path - book.size=', book.size)
return book
@@ -2190,24 +2190,24 @@ class KOBOTOUCH(KOBO):
if oncard == 'cardb':
print('path from_contentid cardb')
else:
- if (ContentType == "6" or ContentType == "10"):
+ if (ContentType == '6' or ContentType == '10'):
if (MimeType == 'application/octet-stream'): # Audiobooks purchased from Kobo are in a different location.
path = self._main_prefix + KOBO_ROOT_DIR_NAME + '/audiobook/' + path
elif (MimeType == 'audio/mpeg' and self.isTolinoDevice()):
path = self._main_prefix + KOBO_ROOT_DIR_NAME + '/audiobook/' + path
- elif path.startswith("file:///mnt/onboard/"):
- path = self._main_prefix + path.replace("file:///mnt/onboard/", '')
- elif path.startswith("file:///mnt/sd/"):
- path = self._card_a_prefix + path.replace("file:///mnt/sd/", '')
+ elif path.startswith('file:///mnt/onboard/'):
+ path = self._main_prefix + path.replace('file:///mnt/onboard/', '')
+ elif path.startswith('file:///mnt/sd/'):
+ path = self._card_a_prefix + path.replace('file:///mnt/sd/', '')
elif externalId:
path = self._card_a_prefix + 'koboExtStorage/kepub/' + path
else:
path = self._main_prefix + KOBO_ROOT_DIR_NAME + '/kepub/' + path
else: # Should never get here, but, just in case...
# if path.startswith("file:///mnt/onboard/"):
- path = path.replace("file:///mnt/onboard/", self._main_prefix)
- path = path.replace("file:///mnt/sd/", self._card_a_prefix)
- path = path.replace("/mnt/onboard/", self._main_prefix)
+ path = path.replace('file:///mnt/onboard/', self._main_prefix)
+ path = path.replace('file:///mnt/sd/', self._card_a_prefix)
+ path = path.replace('/mnt/onboard/', self._main_prefix)
# print "Internal: " + path
return path
@@ -2222,11 +2222,11 @@ class KOBOTOUCH(KOBO):
fpath = path + ending
if os.path.exists(fpath):
if show_debug:
- debug_print("KoboTouch:imagefilename_from_imageID - have cover image fpath=%s" % (fpath))
+ debug_print('KoboTouch:imagefilename_from_imageID - have cover image fpath=%s' % (fpath))
return fpath
if show_debug:
- debug_print("KoboTouch:imagefilename_from_imageID - no cover image found - ImageID=%s" % (ImageID))
+ debug_print('KoboTouch:imagefilename_from_imageID - no cover image found - ImageID=%s' % (ImageID))
return None
def get_extra_css(self):
@@ -2239,13 +2239,13 @@ class KOBOTOUCH(KOBO):
from css_parser import parseFile as cssparseFile
try:
extra_sheet = cssparseFile(extra_css_path)
- debug_print(f"KoboTouch:get_extra_css: Using extra CSS in {extra_css_path} ({len(extra_sheet.cssRules)} rules)")
+ debug_print(f'KoboTouch:get_extra_css: Using extra CSS in {extra_css_path} ({len(extra_sheet.cssRules)} rules)')
if len(extra_sheet.cssRules) ==0:
- debug_print("KoboTouch:get_extra_css: Extra CSS file has no valid rules. CSS will not be modified.")
+ debug_print('KoboTouch:get_extra_css: Extra CSS file has no valid rules. CSS will not be modified.')
extra_sheet = None
except Exception as e:
- debug_print(f"KoboTouch:get_extra_css: Problem parsing extra CSS file {extra_css_path}")
- debug_print(f"KoboTouch:get_extra_css: Exception {e}")
+ debug_print(f'KoboTouch:get_extra_css: Problem parsing extra CSS file {extra_css_path}')
+ debug_print(f'KoboTouch:get_extra_css: Exception {e}')
# create dictionary of features enabled in kobo extra css
self.extra_css_options = {}
@@ -2276,9 +2276,9 @@ class KOBOTOUCH(KOBO):
self.extra_sheet = self.get_extra_css()
i = 0
for file, n, mi in zip(files, names, metadata):
- debug_print("KoboTouch:upload_books: Processing book: {} by {}".format(mi.title, " and ".join(mi.authors)))
- debug_print(f"KoboTouch:upload_books: file={file}, name={n}")
- self.report_progress(i / float(len(files)), "Processing book: {} by {}".format(mi.title, " and ".join(mi.authors)))
+ debug_print('KoboTouch:upload_books: Processing book: {} by {}'.format(mi.title, ' and '.join(mi.authors)))
+ debug_print(f'KoboTouch:upload_books: file={file}, name={n}')
+ self.report_progress(i / float(len(files)), 'Processing book: {} by {}'.format(mi.title, ' and '.join(mi.authors)))
mi.kte_calibre_name = n
self._modify_epub(file, mi)
i += 1
@@ -2292,7 +2292,7 @@ class KOBOTOUCH(KOBO):
try:
with closing(self.device_database_connection()) as connection:
cursor = connection.cursor()
- cleanup_query = f"DELETE FROM content WHERE ContentID = ? AND Accessibility = 1 AND IsDownloaded = {self.bool_for_query(False)}"
+ cleanup_query = f'DELETE FROM content WHERE ContentID = ? AND Accessibility = 1 AND IsDownloaded = {self.bool_for_query(False)}'
for fname, cycle in result:
show_debug = self.is_debugging_title(fname)
contentID = self.contentid_from_path(fname, 6)
@@ -2318,11 +2318,11 @@ class KOBOTOUCH(KOBO):
return result
def _modify_epub(self, book_file, metadata, container=None):
- debug_print(f"KoboTouch:_modify_epub:Processing {metadata.author_sort} - {metadata.title}")
+ debug_print(f'KoboTouch:_modify_epub:Processing {metadata.author_sort} - {metadata.title}')
# Currently only modifying CSS, so if no stylesheet, don't do anything
if not self.extra_sheet:
- debug_print("KoboTouch:_modify_epub: no CSS file")
+ debug_print('KoboTouch:_modify_epub: no CSS file')
return True
container, commit_container = self.create_container(book_file, metadata, container)
@@ -2339,14 +2339,14 @@ class KOBOTOUCH(KOBO):
# future css mods may be epub/kepub specific, so pass file extension arg
fileext = os.path.splitext(book_file)[-1].lower()
- debug_print(f"KoboTouch:_modify_epub: Modifying {cssname}")
+ debug_print(f'KoboTouch:_modify_epub: Modifying {cssname}')
if self._modify_stylesheet(newsheet, fileext):
- debug_print(f"KoboTouch:_modify_epub:CSS rules {oldrules} -> {len(newsheet.cssRules)} ({cssname})")
+ debug_print(f'KoboTouch:_modify_epub:CSS rules {oldrules} -> {len(newsheet.cssRules)} ({cssname})')
container.dirty(cssname)
is_dirty = True
if commit_container:
- debug_print("KoboTouch:_modify_epub: committing container.")
+ debug_print('KoboTouch:_modify_epub: committing container.')
self.commit_container(container, is_dirty)
return True
@@ -2361,7 +2361,7 @@ class KOBOTOUCH(KOBO):
if self.extra_css_options.get('has_atpage', False):
page_rules = self.get_extra_css_rules(sheet, CSSRule.PAGE_RULE)
if len(page_rules) > 0:
- debug_print("KoboTouch:_modify_stylesheet: Removing existing @page rules")
+ debug_print('KoboTouch:_modify_stylesheet: Removing existing @page rules')
for rule in page_rules:
rule.style = ''
is_dirty = True
@@ -2371,14 +2371,14 @@ class KOBOTOUCH(KOBO):
if self.extra_css_options.get('has_widows_orphans', False):
widow_orphan_rules = self.get_extra_css_rules_widow_orphan(sheet)
if len(widow_orphan_rules) > 0:
- debug_print("KoboTouch:_modify_stylesheet: Removing existing widows/orphans attribs")
+ debug_print('KoboTouch:_modify_stylesheet: Removing existing widows/orphans attribs')
for rule in widow_orphan_rules:
rule.style.removeProperty('widows')
rule.style.removeProperty('orphans')
is_dirty = True
# append all rules from kobo extra css
- debug_print("KoboTouch:_modify_stylesheet: Append all kobo extra css rules")
+ debug_print('KoboTouch:_modify_stylesheet: Append all kobo extra css rules')
for extra_rule in self.extra_sheet.cssRules:
sheet.insertRule(extra_rule)
is_dirty = True
@@ -2391,25 +2391,25 @@ class KOBOTOUCH(KOBO):
commit_container = True
try:
from calibre.ebooks.oeb.polish.container import get_container
- debug_print("KoboTouch:create_container: try to create new container")
+ debug_print('KoboTouch:create_container: try to create new container')
container = get_container(book_file)
container.css_preprocessor = DummyCSSPreProcessor()
except Exception as e:
- debug_print(f"KoboTouch:create_container: exception from get_container {metadata.author_sort} - {metadata.title}")
- debug_print(f"KoboTouch:create_container: exception is: {e}")
+ debug_print(f'KoboTouch:create_container: exception from get_container {metadata.author_sort} - {metadata.title}')
+ debug_print(f'KoboTouch:create_container: exception is: {e}')
else:
commit_container = False
- debug_print("KoboTouch:create_container: received container")
+ debug_print('KoboTouch:create_container: received container')
return container, commit_container
def commit_container(self, container, is_dirty=True):
# commit container if changes have been made
if is_dirty:
- debug_print("KoboTouch:commit_container: commit container.")
+ debug_print('KoboTouch:commit_container: commit container.')
container.commit()
# Clean-up-AYGO prevents build-up of TEMP exploded epub/kepub files
- debug_print("KoboTouch:commit_container: removing container temp files.")
+ debug_print('KoboTouch:commit_container: removing container temp files.')
try:
shutil.rmtree(container.root)
except Exception:
@@ -2466,18 +2466,18 @@ class KOBOTOUCH(KOBO):
return imageId
def delete_images(self, ImageID, book_path):
- debug_print("KoboTouch:delete_images - ImageID=", ImageID)
+ debug_print('KoboTouch:delete_images - ImageID=', ImageID)
if ImageID is not None:
path = self.images_path(book_path, ImageID)
- debug_print("KoboTouch:delete_images - path=%s" % path)
+ debug_print('KoboTouch:delete_images - path=%s' % path)
for ending in self.cover_file_endings().keys():
fpath = path + ending
fpath = self.normalize_path(fpath)
- debug_print("KoboTouch:delete_images - fpath=%s" % fpath)
+ debug_print('KoboTouch:delete_images - fpath=%s' % fpath)
if os.path.exists(fpath):
- debug_print("KoboTouch:delete_images - Image File Exists")
+ debug_print('KoboTouch:delete_images - Image File Exists')
os.unlink(fpath)
try:
@@ -2501,20 +2501,20 @@ class KOBOTOUCH(KOBO):
ContentID = ContentID.replace(self._main_prefix + self.normalize_path(KOBO_ROOT_DIR_NAME + '/kepub/'), '')
else:
ContentID = path
- ContentID = ContentID.replace(self._main_prefix, "file:///mnt/onboard/")
+ ContentID = ContentID.replace(self._main_prefix, 'file:///mnt/onboard/')
if show_debug:
debug_print("KoboTouch:contentid_from_path - 1 ContentID='%s'"%ContentID)
if self._card_a_prefix is not None:
- ContentID = ContentID.replace(self._card_a_prefix, "file:///mnt/sd/")
+ ContentID = ContentID.replace(self._card_a_prefix, 'file:///mnt/sd/')
else: # ContentType = 16
debug_print("KoboTouch:contentid_from_path ContentType other than 6 - ContentType='%d'"%ContentType, "path='%s'"%path)
ContentID = path
- ContentID = ContentID.replace(self._main_prefix, "file:///mnt/onboard/")
+ ContentID = ContentID.replace(self._main_prefix, 'file:///mnt/onboard/')
if self._card_a_prefix is not None:
- ContentID = ContentID.replace(self._card_a_prefix, "file:///mnt/sd/")
- ContentID = ContentID.replace("\\", '/')
+ ContentID = ContentID.replace(self._card_a_prefix, 'file:///mnt/sd/')
+ ContentID = ContentID.replace('\\', '/')
if show_debug:
debug_print("KoboTouch:contentid_from_path - end - ContentID='%s'"%ContentID)
return ContentID
@@ -2526,7 +2526,7 @@ class KOBOTOUCH(KOBO):
return ContentType
def get_content_type_from_extension(self, extension):
- debug_print("KoboTouch:get_content_type_from_extension - start")
+ debug_print('KoboTouch:get_content_type_from_extension - start')
# With new firmware, ContentType appears to be 6 for all types of sideloaded books.
ContentType = 6
if self.fwversion < (1,9,17):
@@ -2540,30 +2540,30 @@ class KOBOTOUCH(KOBO):
def update_device_database_collections(self, booklists, collections_attributes, oncard):
debug_print("KoboTouch:update_device_database_collections - oncard='%s'"%oncard)
debug_print("KoboTouch:update_device_database_collections - device='%s'" % self)
- if self.modify_database_check("update_device_database_collections") is False:
+ if self.modify_database_check('update_device_database_collections') is False:
return
# Only process categories in this list
supportedcategories = {
- "Im_Reading": 1,
- "Read": 2,
- "Closed": 3,
- "Shortlist": 4,
- "Archived": 5,
+ 'Im_Reading': 1,
+ 'Read': 2,
+ 'Closed': 3,
+ 'Shortlist': 4,
+ 'Archived': 5,
}
# Define lists for the ReadStatus
readstatuslist = {
- "Im_Reading":1,
- "Read":2,
- "Closed":3,
+ 'Im_Reading':1,
+ 'Read':2,
+ 'Closed':3,
}
accessibilitylist = {
- "Deleted":1,
- "OverDrive":9,
- "Preview":6,
- "Recommendation":4,
+ 'Deleted':1,
+ 'OverDrive':9,
+ 'Preview':6,
+ 'Recommendation':4,
}
# debug_print('KoboTouch:update_device_database_collections - collections_attributes=', collections_attributes)
@@ -2604,10 +2604,10 @@ class KOBOTOUCH(KOBO):
# Need to reset the collections outside the particular loops
# otherwise the last item will not be removed
if self.dbversion < 53:
- debug_print("KoboTouch:update_device_database_collections - calling reset_readstatus")
+ debug_print('KoboTouch:update_device_database_collections - calling reset_readstatus')
self.reset_readstatus(connection, oncard)
if self.dbversion >= 14 and self.fwversion < self.min_fwversion_shelves:
- debug_print("KoboTouch:update_device_database_collections - calling reset_favouritesindex")
+ debug_print('KoboTouch:update_device_database_collections - calling reset_favouritesindex')
self.reset_favouritesindex(connection, oncard)
# debug_print("KoboTouch:update_device_database_collections - length collections=", len(collections))
@@ -2681,17 +2681,17 @@ class KOBOTOUCH(KOBO):
elif have_bookshelf_attributes: # No collections but have set the shelf option
# Since no collections exist the ReadStatus needs to be reset to 0 (Unread)
- debug_print("No Collections - resetting ReadStatus")
+ debug_print('No Collections - resetting ReadStatus')
if self.dbversion < 53:
self.reset_readstatus(connection, oncard)
if self.dbversion >= 14 and self.fwversion < self.min_fwversion_shelves:
- debug_print("No Collections - resetting FavouritesIndex")
+ debug_print('No Collections - resetting FavouritesIndex')
self.reset_favouritesindex(connection, oncard)
# Set the series info and cleanup the bookshelves only if the firmware supports them and the user has set the options.
if (self.supports_bookshelves and self.manage_collections or self.supports_series()) and (
have_bookshelf_attributes or update_series_details or update_core_metadata):
- debug_print("KoboTouch:update_device_database_collections - managing bookshelves and series.")
+ debug_print('KoboTouch:update_device_database_collections - managing bookshelves and series.')
self.series_set = 0
self.core_metadata_set = 0
@@ -2702,29 +2702,29 @@ class KOBOTOUCH(KOBO):
books_in_library += 1
show_debug = self.is_debugging_title(book.title)
if show_debug:
- debug_print("KoboTouch:update_device_database_collections - book.title=%s" % book.title)
+ debug_print('KoboTouch:update_device_database_collections - book.title=%s' % book.title)
debug_print(
- "KoboTouch:update_device_database_collections - contentId=%s,"
- "update_core_metadata=%s,update_purchased_kepubs=%s, book.is_sideloaded=%s" % (
+ 'KoboTouch:update_device_database_collections - contentId=%s,'
+ 'update_core_metadata=%s,update_purchased_kepubs=%s, book.is_sideloaded=%s' % (
book.contentID, update_core_metadata, update_purchased_kepubs, book.is_sideloaded))
if update_core_metadata and (update_purchased_kepubs or book.is_sideloaded):
if show_debug:
- debug_print("KoboTouch:update_device_database_collections - calling set_core_metadata")
+ debug_print('KoboTouch:update_device_database_collections - calling set_core_metadata')
self.set_core_metadata(connection, book)
elif update_series_details:
if show_debug:
- debug_print("KoboTouch:update_device_database_collections - calling set_core_metadata - series only")
+ debug_print('KoboTouch:update_device_database_collections - calling set_core_metadata - series only')
self.set_core_metadata(connection, book, series_only=True)
if self.manage_collections and have_bookshelf_attributes:
if show_debug:
- debug_print("KoboTouch:update_device_database_collections - about to remove a book from shelves book.title=%s" % book.title)
+ debug_print('KoboTouch:update_device_database_collections - about to remove a book from shelves book.title=%s' % book.title)
self.remove_book_from_device_bookshelves(connection, book)
book.device_collections.extend(book.kobo_collections)
if not prefs['manage_device_metadata'] == 'manual' and delete_empty_collections:
- debug_print("KoboTouch:update_device_database_collections - about to clear empty bookshelves")
+ debug_print('KoboTouch:update_device_database_collections - about to clear empty bookshelves')
self.delete_empty_bookshelves(connection)
- debug_print("KoboTouch:update_device_database_collections - Number of series set=%d Number of books=%d" % (self.series_set, books_in_library))
- debug_print("KoboTouch:update_device_database_collections - Number of core metadata set=%d Number of books=%d" % (
+ debug_print('KoboTouch:update_device_database_collections - Number of series set=%d Number of books=%d' % (self.series_set, books_in_library))
+ debug_print('KoboTouch:update_device_database_collections - Number of core metadata set=%d Number of books=%d' % (
self.core_metadata_set, books_in_library))
self.dump_bookshelves(connection)
@@ -2732,7 +2732,7 @@ class KOBOTOUCH(KOBO):
debug_print('KoboTouch:update_device_database_collections - Finished ')
def rebuild_collections(self, booklist, oncard):
- debug_print("KoboTouch:rebuild_collections")
+ debug_print('KoboTouch:rebuild_collections')
collections_attributes = self.get_collections_attributes()
debug_print('KoboTouch:rebuild_collections: collection fields:', collections_attributes)
@@ -2793,7 +2793,7 @@ class KOBOTOUCH(KOBO):
hash1 = qhash(imageId)
dir1 = hash1 & (0xff * 1)
dir2 = (hash1 & (0xff00 * 1)) >> 8
- path = os.path.join(path, "%s" % dir1, "%s" % dir2)
+ path = os.path.join(path, '%s' % dir1, '%s' % dir2)
if imageId:
path = os.path.join(path, imageId)
@@ -2854,7 +2854,7 @@ class KOBOTOUCH(KOBO):
from calibre.utils.img import save_cover_data_to
data = save_cover_data_to(
cover_data, resize_to=resize_to, compression_quality=quality, minify_to=minify_to, grayscale=upload_grayscale, eink=dithered_covers,
- letterbox=letterbox, data_fmt="png" if png_covers else "jpeg", letterbox_color=letterbox_color)
+ letterbox=letterbox, data_fmt='png' if png_covers else 'jpeg', letterbox_color=letterbox_color)
return data
def _upload_cover(
@@ -2876,7 +2876,7 @@ class KOBOTOUCH(KOBO):
cover = self.normalize_path(metadata.cover.replace('/', os.sep))
if not os.path.exists(cover):
- debug_print("KoboTouch:_upload_cover - Cover file does not exist in library")
+ debug_print('KoboTouch:_upload_cover - Cover file does not exist in library')
return
# Get ContentID for Selected Book
@@ -2903,7 +2903,7 @@ class KOBOTOUCH(KOBO):
path = self.images_path(path, ImageID)
if show_debug:
- debug_print("KoboTouch:_upload_cover - About to loop over cover endings")
+ debug_print('KoboTouch:_upload_cover - About to loop over cover endings')
image_dir = os.path.dirname(os.path.abspath(path))
if not os.path.exists(image_dir):
@@ -2919,7 +2919,7 @@ class KOBOTOUCH(KOBO):
for ending, cover_options in self.cover_file_endings().items():
kobo_size, min_dbversion, max_dbversion, is_full_size = cover_options
if show_debug:
- debug_print("KoboTouch:_upload_cover - library_cover_size=%s -> kobo_size=%s, min_dbversion=%d max_dbversion=%d, is_full_size=%s" % (
+ debug_print('KoboTouch:_upload_cover - library_cover_size=%s -> kobo_size=%s, min_dbversion=%d max_dbversion=%d, is_full_size=%s' % (
library_cover_size, kobo_size, min_dbversion, max_dbversion, is_full_size))
if self.dbversion >= min_dbversion and self.dbversion <= max_dbversion:
@@ -2943,8 +2943,8 @@ class KOBOTOUCH(KOBO):
resize_to, expand_to = self._calculate_kobo_cover_size(library_cover_size, kobo_size, not is_full_size, keep_cover_aspect, letterbox)
if show_debug:
debug_print(
- "KoboTouch:_calculate_kobo_cover_size - expand_to=%s"
- " (vs. kobo_size=%s) & resize_to=%s, keep_cover_aspect=%s & letterbox_fs_covers=%s, png_covers=%s" % (
+ 'KoboTouch:_calculate_kobo_cover_size - expand_to=%s'
+ ' (vs. kobo_size=%s) & resize_to=%s, keep_cover_aspect=%s & letterbox_fs_covers=%s, png_covers=%s' % (
expand_to, kobo_size, resize_to, keep_cover_aspect, letterbox_fs_covers, png_covers))
# NOTE: To speed things up, we enforce a lower
@@ -2983,7 +2983,7 @@ class KOBOTOUCH(KOBO):
fsync(f)
except Exception as e:
err = str(e)
- debug_print("KoboTouch:_upload_cover - Exception string: %s"%err)
+ debug_print('KoboTouch:_upload_cover - Exception string: %s'%err)
raise
def remove_book_from_device_bookshelves(self, connection, book):
@@ -3061,7 +3061,7 @@ class KOBOTOUCH(KOBO):
# debug_print("KoboTouch:set_filesize_in_device_database - end")
def delete_empty_bookshelves(self, connection):
- debug_print("KoboTouch:delete_empty_bookshelves - start")
+ debug_print('KoboTouch:delete_empty_bookshelves - start')
ignore_collections_placeholder = ''
ignore_collections_values = []
@@ -3069,8 +3069,8 @@ class KOBOTOUCH(KOBO):
placeholder = ',?'
ignore_collections_placeholder = ''.join(placeholder for unused in self.ignore_collections_names)
ignore_collections_values.extend(self.ignore_collections_names)
- debug_print("KoboTouch:delete_empty_bookshelves - ignore_collections_in=", ignore_collections_placeholder)
- debug_print("KoboTouch:delete_empty_bookshelves - ignore_collections=", ignore_collections_values)
+ debug_print('KoboTouch:delete_empty_bookshelves - ignore_collections_in=', ignore_collections_placeholder)
+ debug_print('KoboTouch:delete_empty_bookshelves - ignore_collections=', ignore_collections_values)
true, false = self.bool_for_query(True), self.bool_for_query(False)
delete_query = ("DELETE FROM Shelf "
@@ -3081,7 +3081,7 @@ class KOBOTOUCH(KOBO):
"(SELECT 1 FROM ShelfContent c "
"WHERE Shelf.Name = c.ShelfName "
f"AND c._IsDeleted <> {true})")
- debug_print("KoboTouch:delete_empty_bookshelves - delete_query=", delete_query)
+ debug_print('KoboTouch:delete_empty_bookshelves - delete_query=', delete_query)
update_query = ("UPDATE Shelf "
f"SET _IsDeleted = {true} "
@@ -3092,7 +3092,7 @@ class KOBOTOUCH(KOBO):
"(SELECT 1 FROM ShelfContent c "
"WHERE Shelf.Name = c.ShelfName "
f"AND c._IsDeleted <> {true})")
- debug_print("KoboTouch:delete_empty_bookshelves - update_query=", update_query)
+ debug_print('KoboTouch:delete_empty_bookshelves - update_query=', update_query)
delete_activity_query = ("DELETE FROM Activity "
"WHERE Type = 'Shelf' "
@@ -3101,7 +3101,7 @@ class KOBOTOUCH(KOBO):
"WHERE Shelf.Name = Activity.Id "
f"AND Shelf._IsDeleted = {false})"
)
- debug_print("KoboTouch:delete_empty_bookshelves - delete_activity_query=", delete_activity_query)
+ debug_print('KoboTouch:delete_empty_bookshelves - delete_activity_query=', delete_activity_query)
cursor = connection.cursor()
cursor.execute(delete_query, ignore_collections_values)
@@ -3110,7 +3110,7 @@ class KOBOTOUCH(KOBO):
cursor.execute(delete_activity_query)
cursor.close()
- debug_print("KoboTouch:delete_empty_bookshelves - end")
+ debug_print('KoboTouch:delete_empty_bookshelves - end')
def get_bookshelflist(self, connection):
# Retrieve the list of booksehelves
@@ -3193,11 +3193,11 @@ class KOBOTOUCH(KOBO):
bookshelf_name,
time.strftime(self.TIMESTAMP_STRING, time.gmtime()),
bookshelf_name,
- "false",
- "true",
- "false",
+ 'false',
+ 'true',
+ 'false',
)
- shelf_type = "UserTag" # if self.supports_reading_list else None
+ shelf_type = 'UserTag' # if self.supports_reading_list else None
if self.dbversion < 64:
addquery += ' ("CreationDate","InternalName","LastModified","Name","_IsDeleted","_IsVisible","_IsSynced")'\
' VALUES (?, ?, ?, ?, ?, ?, ?)'
@@ -3246,9 +3246,9 @@ class KOBOTOUCH(KOBO):
values.append(ContentID)
else:
if oncard == 'carda':
- query += ' WHERE ContentID like \'file:///mnt/sd/%\''
+ query += " WHERE ContentID like 'file:///mnt/sd/%'"
elif oncard != 'carda' and oncard != 'cardb':
- query += ' WHERE ContentID not like \'file:///mnt/sd/%\''
+ query += " WHERE ContentID not like 'file:///mnt/sd/%'"
if bookshelves:
placeholder = '?'
@@ -3261,7 +3261,7 @@ class KOBOTOUCH(KOBO):
cursor.execute(query, values)
cursor.close()
- debug_print("KoboTouch:remove_from_bookshelf - end")
+ debug_print('KoboTouch:remove_from_bookshelf - end')
# No longer used, but keep for a little bit.
def set_series(self, connection, book):
@@ -3289,7 +3289,7 @@ class KOBOTOUCH(KOBO):
elif book.series_index is None: # This should never happen, but...
update_values = (book.series, None, book.contentID, )
else:
- update_values = (book.series, "%g"%book.series_index, book.contentID, )
+ update_values = (book.series, '%g'%book.series_index, book.contentID, )
cursor = connection.cursor()
try:
@@ -3304,7 +3304,7 @@ class KOBOTOUCH(KOBO):
cursor.close()
if show_debug:
- debug_print("KoboTouch:set_series - end")
+ debug_print('KoboTouch:set_series - end')
def set_core_metadata(self, connection, book, series_only=False):
# debug_print('KoboTouch:set_core_metadata book="%s"' % book.title)
@@ -3319,9 +3319,9 @@ class KOBOTOUCH(KOBO):
new_value = None
else:
new_value = new_value if len(new_value.strip()) else None
- if new_value is not None and new_value.startswith("PLUGBOARD TEMPLATE ERROR"):
+ if new_value is not None and new_value.startswith('PLUGBOARD TEMPLATE ERROR'):
debug_print("KoboTouch:generate_update_from_template template error - template='%s'" % template)
- debug_print("KoboTouch:generate_update_from_template - new_value=", new_value)
+ debug_print('KoboTouch:generate_update_from_template - new_value=', new_value)
# debug_print(
# f"KoboTouch:generate_update_from_template - {book.title} - column_name='{column_name}',"
@@ -3366,7 +3366,7 @@ class KOBOTOUCH(KOBO):
if newmi.series is not None:
new_series = newmi.series
try:
- new_series_number = "%g" % newmi.series_index
+ new_series_number = '%g' % newmi.series_index
except:
new_series_number = None
else:
@@ -3462,9 +3462,9 @@ class KOBOTOUCH(KOBO):
new_subtitle = None
else:
new_subtitle = book.subtitle if len(book.subtitle.strip()) else None
- if new_subtitle is not None and new_subtitle.startswith("PLUGBOARD TEMPLATE ERROR"):
+ if new_subtitle is not None and new_subtitle.startswith('PLUGBOARD TEMPLATE ERROR'):
debug_print("KoboTouch:set_core_metadata subtitle template error - self.subtitle_template='%s'" % self.subtitle_template)
- debug_print("KoboTouch:set_core_metadata - new_subtitle=", new_subtitle)
+ debug_print('KoboTouch:set_core_metadata - new_subtitle=', new_subtitle)
if (new_subtitle is not None and (book.kobo_subtitle is None or book.subtitle != book.kobo_subtitle)) or \
(new_subtitle is None and book.kobo_subtitle is not None):
@@ -3531,7 +3531,7 @@ class KOBOTOUCH(KOBO):
cursor.close()
if show_debug:
- debug_print("KoboTouch:set_core_metadata - end")
+ debug_print('KoboTouch:set_core_metadata - end')
@classmethod
def config_widget(cls):
@@ -3556,7 +3556,7 @@ class KOBOTOUCH(KOBO):
try:
return getattr(cls.opts, key)
except:
- debug_print("KoboTouch::get_prefs - probably an extra_customization:", key)
+ debug_print('KoboTouch::get_prefs - probably an extra_customization:', key)
return None
@classmethod
@@ -4068,7 +4068,7 @@ class KOBOTOUCH(KOBO):
debug_print('The database has been upgraded past supported version')
self.report_progress(1.0, _('Removing books from device...'))
from calibre.devices.errors import UserFeedback
- raise UserFeedback(_("Kobo database version unsupported - See details"),
+ raise UserFeedback(_('Kobo database version unsupported - See details'),
_('Your Kobo is running an updated firmware/database version.'
' As calibre does not know about this updated firmware,'
' database editing is disabled, to prevent corruption.'
@@ -4105,14 +4105,14 @@ class KOBOTOUCH(KOBO):
def is_supported_fwversion(self):
# Starting with firmware version 3.19.x, the last number appears to be is a
# build number. It can be safely ignored when testing the firmware version.
- debug_print("KoboTouch::is_supported_fwversion - self.fwversion[:2]", self.fwversion[:2])
+ debug_print('KoboTouch::is_supported_fwversion - self.fwversion[:2]', self.fwversion[:2])
return self.fwversion[:2] > self.max_supported_fwversion
@classmethod
def migrate_old_settings(cls, settings):
- debug_print("KoboTouch::migrate_old_settings - start")
- debug_print("KoboTouch::migrate_old_settings - settings.extra_customization=", settings.extra_customization)
- debug_print("KoboTouch::migrate_old_settings - For class=", cls.name)
+ debug_print('KoboTouch::migrate_old_settings - start')
+ debug_print('KoboTouch::migrate_old_settings - settings.extra_customization=', settings.extra_customization)
+ debug_print('KoboTouch::migrate_old_settings - For class=', cls.name)
count_options = 0
OPT_COLLECTIONS = count_options
@@ -4146,11 +4146,11 @@ class KOBOTOUCH(KOBO):
# the total number of options.
if cls == KOBOTOUCH or len(settings.extra_customization) >= count_options:
config = cls._config()
- debug_print("KoboTouch::migrate_old_settings - config.preferences=", config.preferences)
- debug_print("KoboTouch::migrate_old_settings - settings need to be migrated")
+ debug_print('KoboTouch::migrate_old_settings - config.preferences=', config.preferences)
+ debug_print('KoboTouch::migrate_old_settings - settings need to be migrated')
settings.manage_collections = True
settings.collections_columns = settings.extra_customization[OPT_COLLECTIONS]
- debug_print("KoboTouch::migrate_old_settings - settings.collections_columns=", settings.collections_columns)
+ debug_print('KoboTouch::migrate_old_settings - settings.collections_columns=', settings.collections_columns)
settings.create_collections = settings.extra_customization[OPT_CREATE_BOOKSHELVES]
settings.delete_empty_collections = settings.extra_customization[OPT_DELETE_BOOKSHELVES]
@@ -4183,7 +4183,7 @@ class KOBOTOUCH(KOBO):
debugging_title = settings.extra_customization[OPT_SUPPORT_NEWER_FIRMWARE]
start_subclass_extra_options = OPT_SUPPORT_NEWER_FIRMWARE + 1
else:
- debug_print("KoboTouch::migrate_old_settings - Have all options")
+ debug_print('KoboTouch::migrate_old_settings - Have all options')
settings.update_series = settings.extra_customization[OPT_UPDATE_SERIES_DETAILS]
settings.modify_css = settings.extra_customization[OPT_MODIFY_CSS]
settings.support_newer_firmware = settings.extra_customization[OPT_SUPPORT_NEWER_FIRMWARE]
@@ -4230,9 +4230,9 @@ class KOBOTOUCH(KOBO):
prints(placeholders%row)
i += 1
if i == 0:
- prints("No shelves found!!")
+ prints('No shelves found!!')
else:
- prints("Number of shelves=%d"%i)
+ prints('Number of shelves=%d'%i)
prints('\nBooks on shelves on device:')
cursor.execute(shelfcontent_query)
@@ -4242,16 +4242,16 @@ class KOBOTOUCH(KOBO):
prints(placeholders%row)
i += 1
if i == 0:
- prints("No books are on any shelves!!")
+ prints('No books are on any shelves!!')
else:
- prints("Number of shelved books=%d"%i)
+ prints('Number of shelved books=%d'%i)
cursor.close()
debug_print('KoboTouch:dump_bookshelves - end')
def __str__(self, *args, **kwargs):
options = ', '.join([f'{x.name}: {self.get_pref(x.name)}' for x in self._config().preferences])
- return f"Driver:{self.name}, Options - {options}"
+ return f'Driver:{self.name}, Options - {options}'
if __name__ == '__main__':
@@ -4265,8 +4265,8 @@ if __name__ == '__main__':
devs = scanner.devices
# debug_print("unit test: devs.__class__=", devs.__class__)
# debug_print("unit test: devs.__class__=", devs.__class__.__name__)
- debug_print("unit test: devs=", devs)
- debug_print("unit test: dev=", dev)
+ debug_print('unit test: devs=', devs)
+ debug_print('unit test: dev=', dev)
# cd = dev.detect_managed_devices(devs)
# if cd is None:
# raise ValueError('Failed to detect KOBOTOUCH device')
diff --git a/src/calibre/devices/kobo/kobotouch_config.py b/src/calibre/devices/kobo/kobotouch_config.py
index eb3a7459b9..179e2b18bd 100644
--- a/src/calibre/devices/kobo/kobotouch_config.py
+++ b/src/calibre/devices/kobo/kobotouch_config.py
@@ -52,7 +52,7 @@ class KOBOTOUCHConfig(TabbedDeviceConfig):
self.tab1 = Tab1Config(self, self.device)
self.tab2 = Tab2Config(self, self.device)
- self.addDeviceTab(self.tab1, _("Collections, covers && uploads"))
+ self.addDeviceTab(self.tab1, _('Collections, covers && uploads'))
self.addDeviceTab(self.tab2, _('Metadata, on device && advanced'))
def get_pref(self, key):
@@ -92,7 +92,7 @@ class KOBOTOUCHConfig(TabbedDeviceConfig):
return self.tab2.metadata_options
def commit(self):
- debug_print("KOBOTOUCHConfig::commit: start")
+ debug_print('KOBOTOUCHConfig::commit: start')
p = super().commit()
p['manage_collections'] = self.manage_collections
@@ -195,14 +195,14 @@ class BookUploadsGroupBox(DeviceOptionsGroupBox):
def __init__(self, parent, device):
super().__init__(parent, device)
- self.setTitle(_("Uploading of books"))
+ self.setTitle(_('Uploading of books'))
self.options_layout = QGridLayout()
- self.options_layout.setObjectName("options_layout")
+ self.options_layout.setObjectName('options_layout')
self.setLayout(self.options_layout)
self.modify_css_checkbox = create_checkbox(
- _("Modify CSS"),
+ _('Modify CSS'),
_('This allows addition of user CSS rules and removal of some CSS. '
'When sending a book, the driver adds the contents of {0} to all stylesheets in the EPUB. '
'This file is searched for in the root folder of the main memory of the device. '
@@ -211,7 +211,7 @@ class BookUploadsGroupBox(DeviceOptionsGroupBox):
device.get_pref('modify_css')
)
self.override_kobo_replace_existing_checkbox = create_checkbox(
- _("Do not treat replacements as new books"),
+ _('Do not treat replacements as new books'),
_('When a new book is side-loaded, the Kobo firmware imports details of the book into the internal database. '
'Even if the book is a replacement for an existing book, the Kobo will remove the book from the database and then treat it as a new book. '
'This means that the reading status, bookmarks and collections for the book will be lost. '
@@ -237,10 +237,10 @@ class CollectionsGroupBox(DeviceOptionsGroupBox):
def __init__(self, parent, device):
super().__init__(parent, device)
- self.setTitle(_("Collections"))
+ self.setTitle(_('Collections'))
self.options_layout = QGridLayout()
- self.options_layout.setObjectName("options_layout")
+ self.options_layout.setObjectName('options_layout')
self.setLayout(self.options_layout)
self.setCheckable(True)
@@ -248,7 +248,7 @@ class CollectionsGroupBox(DeviceOptionsGroupBox):
self.setToolTip(wrap_msg(_('Create new collections on the Kobo if they do not exist. This is only for firmware V2.0.0 or later.')))
self.use_collections_columns_checkbox = create_checkbox(
- _("Collections columns:"),
+ _('Collections columns:'),
_('Use a column to generate collections.'),
device.get_pref('use_collections_columns')
)
@@ -259,7 +259,7 @@ class CollectionsGroupBox(DeviceOptionsGroupBox):
self.collections_columns_edit.setText(device.get_pref('collections_columns'))
self.use_collections_template_checkbox = create_checkbox(
- _("Collections template:"),
+ _('Collections template:'),
_('Use a template to generate collections.'),
device.get_pref('use_collections_template')
)
@@ -272,7 +272,7 @@ class CollectionsGroupBox(DeviceOptionsGroupBox):
)
self.create_collections_checkbox = create_checkbox(
- _("Create collections"),
+ _('Create collections'),
_('Create new collections on the Kobo if they do not exist. This is only for firmware V2.0.0 or later.'),
device.get_pref('create_collections')
)
@@ -346,10 +346,10 @@ class CoversGroupBox(DeviceOptionsGroupBox):
def __init__(self, parent, device):
super().__init__(parent, device)
- self.setTitle(_("Upload covers"))
+ self.setTitle(_('Upload covers'))
self.options_layout = QGridLayout()
- self.options_layout.setObjectName("options_layout")
+ self.options_layout.setObjectName('options_layout')
self.setLayout(self.options_layout)
self.setCheckable(True)
@@ -465,14 +465,14 @@ class DeviceListGroupBox(DeviceOptionsGroupBox):
def __init__(self, parent, device):
super().__init__(parent, device)
- self.setTitle(_("Show as on device"))
+ self.setTitle(_('Show as on device'))
self.options_layout = QGridLayout()
- self.options_layout.setObjectName("options_layout")
+ self.options_layout.setObjectName('options_layout')
self.setLayout(self.options_layout)
self.show_recommendations_checkbox = create_checkbox(
- _("Show recommendations"),
+ _('Show recommendations'),
_('Kobo shows recommendations on the device. In some cases these have '
'files but in other cases they are just pointers to the web site to buy. '
'Enable if you wish to see/delete them.'),
@@ -480,7 +480,7 @@ class DeviceListGroupBox(DeviceOptionsGroupBox):
)
self.show_archived_books_checkbox = create_checkbox(
- _("Show archived books"),
+ _('Show archived books'),
_('Archived books are listed on the device but need to be downloaded to read.'
' Use this option to show these books and match them with books in the calibre library.'),
device.get_pref('show_archived_books')
@@ -514,15 +514,15 @@ class DeviceListGroupBox(DeviceOptionsGroupBox):
class AdvancedGroupBox(DeviceOptionsGroupBox):
def __init__(self, parent, device):
- super().__init__(parent, device, _("Advanced options"))
+ super().__init__(parent, device, _('Advanced options'))
# self.setTitle(_("Advanced Options"))
self.options_layout = QGridLayout()
- self.options_layout.setObjectName("options_layout")
+ self.options_layout.setObjectName('options_layout')
self.setLayout(self.options_layout)
self.support_newer_firmware_checkbox = create_checkbox(
- _("Attempt to support newer firmware"),
+ _('Attempt to support newer firmware'),
_('Kobo routinely updates the firmware and the '
'database version. With this option calibre will attempt '
'to perform full read-write functionality - Here be Dragons!! '
@@ -533,7 +533,7 @@ class AdvancedGroupBox(DeviceOptionsGroupBox):
)
self.debugging_title_checkbox = create_checkbox(
- _("Title to test when debugging"),
+ _('Title to test when debugging'),
_('Part of title of a book that can be used when doing some tests for debugging. '
'The test is to see if the string is contained in the title of a book. '
'The better the match, the less extraneous output.'),
@@ -564,10 +564,10 @@ class MetadataGroupBox(DeviceOptionsGroupBox):
def __init__(self, parent, device):
super().__init__(parent, device)
- self.setTitle(_("Update metadata on the device"))
+ self.setTitle(_('Update metadata on the device'))
self.options_layout = QGridLayout()
- self.options_layout.setObjectName("options_layout")
+ self.options_layout.setObjectName('options_layout')
self.setLayout(self.options_layout)
self.setCheckable(True)
@@ -576,7 +576,7 @@ class MetadataGroupBox(DeviceOptionsGroupBox):
'Be careful when doing this as it will take time and could make the initial connection take a long time.')))
self.update_series_checkbox = create_checkbox(
- _("Set series information"),
+ _('Set series information'),
_('The book lists on the Kobo devices can display series information. '
'This is not read by the device from the sideloaded books. '
'Series information can only be added to the device after the '
@@ -585,7 +585,7 @@ class MetadataGroupBox(DeviceOptionsGroupBox):
device.get_pref('update_series')
)
self.force_series_id_checkbox = create_checkbox(
- _("Force series ID"),
+ _('Force series ID'),
_('Kobo devices use a SeriesID to distinguish between different series. '
'Purchased books have a SeriesID assigned by Kobo. Sideloaded books '
'have a SeriesID assigned by calibre, which is usually different. '
@@ -595,7 +595,7 @@ class MetadataGroupBox(DeviceOptionsGroupBox):
device.get_pref('force_series_id')
)
self.update_core_metadata_checkbox = create_checkbox(
- _("Update metadata on Book Details pages"),
+ _('Update metadata on Book Details pages'),
_('This will update the metadata in the device database when the device is connected. '
'The metadata updated is displayed on the device in the library and the Book details page. '
'This is the title, authors, comments/synopsis, series name and number, publisher and published Date, ISBN and language. '
@@ -605,57 +605,57 @@ class MetadataGroupBox(DeviceOptionsGroupBox):
)
self.update_purchased_kepubs_checkbox = create_checkbox(
- _("Update purchased books"),
+ _('Update purchased books'),
_('Update books purchased from Kobo and downloaded to the device.'
),
device.get_pref('update_purchased_kepubs')
)
self.update_subtitle_checkbox = create_checkbox(
- _("Subtitle"),
+ _('Subtitle'),
_('Update the subtitle on the device using a template.'),
device.get_pref('update_subtitle')
)
self.subtitle_template_edit = TemplateConfig(
device.get_pref('subtitle_template'),
- tooltip=_("Enter a template to use to set the subtitle. "
- "If the template is empty, the subtitle will be cleared."
+ tooltip=_('Enter a template to use to set the subtitle. '
+ 'If the template is empty, the subtitle will be cleared.'
)
)
self.update_bookstats_checkbox = create_checkbox(
- _("Book stats"),
+ _('Book stats'),
_('Update the book stats '),
device.get_pref('update_bookstats')
)
self.bookstats_wordcount_template_edit = TemplateConfig(
device.get_pref('bookstats_wordcount_template'),
- label=_("Words:"),
- tooltip=_("Enter a template to use to set the word count for the book. "
- "If the template is empty, the word count will be cleared."
+ label=_('Words:'),
+ tooltip=_('Enter a template to use to set the word count for the book. '
+ 'If the template is empty, the word count will be cleared.'
)
)
self.bookstats_pagecount_template_edit = TemplateConfig(
device.get_pref('bookstats_pagecount_template'),
- label=_("Pages:"),
- tooltip=_("Enter a template to use to set the page count for the book. "
- "If the template is empty, the page count will be cleared."
+ label=_('Pages:'),
+ tooltip=_('Enter a template to use to set the page count for the book. '
+ 'If the template is empty, the page count will be cleared.'
)
)
self.bookstats_timetoread_label = QLabel(_('Hours to read estimates:'))
self.bookstats_timetoread_upper_template_edit = TemplateConfig(
device.get_pref('bookstats_timetoread_upper_template'),
- label=_("Upper:"),
- tooltip=_("Enter a template to use to set the upper estimate of the time to read for the book. "
- "The estimate is in hours. "
- "If the template is empty, the time will be cleared."
+ label=_('Upper:'),
+ tooltip=_('Enter a template to use to set the upper estimate of the time to read for the book. '
+ 'The estimate is in hours. '
+ 'If the template is empty, the time will be cleared.'
)
)
self.bookstats_timetoread_lower_template_edit = TemplateConfig(
device.get_pref('bookstats_timetoread_lower_template'),
- label=_("Lower:"),
- tooltip=_("Enter a template to use to set the lower estimate of the time to read for the book. "
- "The estimate is in hours. "
- "If the template is empty, the time will be cleared."
+ label=_('Lower:'),
+ tooltip=_('Enter a template to use to set the lower estimate of the time to read for the book. '
+ 'The estimate is in hours. '
+ 'If the template is empty, the time will be cleared.'
)
)
@@ -842,7 +842,7 @@ if __name__ == '__main__':
s = DeviceScanner()
s.scan()
app = Application([])
- debug_print("KOBOTOUCH:", KOBOTOUCH)
+ debug_print('KOBOTOUCH:', KOBOTOUCH)
dev = KOBOTOUCH(None)
# dev.startup()
# cd = dev.detect_managed_devices(s.devices)
diff --git a/src/calibre/devices/mtp/unix/driver.py b/src/calibre/devices/mtp/unix/driver.py
index d3d8047e51..33ef45461c 100644
--- a/src/calibre/devices/mtp/unix/driver.py
+++ b/src/calibre/devices/mtp/unix/driver.py
@@ -234,7 +234,7 @@ class MTP_DEVICE(MTPDeviceBase):
try:
storage = sorted_storage(self.dev.storage_info)
except self.libmtp.MTPError as e:
- if "The device has no storage information." in str(e):
+ if 'The device has no storage information.' in str(e):
# This happens on newer Android devices while waiting for
# the user to allow access. Apparently what happens is
# that when the user clicks allow, the device disconnects
diff --git a/src/calibre/devices/mtp/windows/driver.py b/src/calibre/devices/mtp/windows/driver.py
index 6f8efb3425..5203a30495 100644
--- a/src/calibre/devices/mtp/windows/driver.py
+++ b/src/calibre/devices/mtp/windows/driver.py
@@ -169,7 +169,7 @@ class MTP_DEVICE(MTPDeviceBase):
try:
pnp_ids = frozenset(self.wpd.enumerate_devices())
except:
- p("Failed to get list of PNP ids on system")
+ p('Failed to get list of PNP ids on system')
p(traceback.format_exc())
return False
diff --git a/src/calibre/devices/paladin/driver.py b/src/calibre/devices/paladin/driver.py
index 54d60f00ac..750aef630e 100644
--- a/src/calibre/devices/paladin/driver.py
+++ b/src/calibre/devices/paladin/driver.py
@@ -82,7 +82,7 @@ class PALADIN(USBMS):
bl = USBMS.books(self, oncard=oncard, end_session=end_session)
dbpath = self.normalize_path(prefix + DBPATH)
- debug_print("SQLite DB Path: " + dbpath)
+ debug_print('SQLite DB Path: ' + dbpath)
with closing(apsw.Connection(dbpath)) as connection:
cursor = connection.cursor()
@@ -122,10 +122,10 @@ class PALADIN(USBMS):
try:
device_offset = max(time_offsets, key=lambda a: time_offsets.get(a))
- debug_print("Device Offset: %d ms"%device_offset)
+ debug_print('Device Offset: %d ms'%device_offset)
self.device_offset = device_offset
except ValueError:
- debug_print("No Books To Detect Device Offset.")
+ debug_print('No Books To Detect Device Offset.')
for idx, book in enumerate(bl):
query = 'SELECT _id, thumbnail FROM books WHERE filename = ?'
@@ -174,7 +174,7 @@ class PALADIN(USBMS):
if self.plugboard_func:
plugboard = self.plugboard_func(self.__class__.__name__,
'device_db', self.plugboards)
- debug_print("PALADIN: Using Plugboard", plugboard)
+ debug_print('PALADIN: Using Plugboard', plugboard)
prefix = self._card_a_prefix if oncard == 'carda' else self._main_prefix
if prefix is None:
@@ -183,7 +183,7 @@ class PALADIN(USBMS):
source_id = 1 if oncard == 'carda' else 0
dbpath = self.normalize_path(prefix + DBPATH)
- debug_print("SQLite DB Path: " + dbpath)
+ debug_print('SQLite DB Path: ' + dbpath)
collections = booklist.get_collections(collections_attributes)
@@ -199,7 +199,7 @@ class PALADIN(USBMS):
try:
cursor = connection.cursor()
- debug_print("Removing Orphaned Collection Records")
+ debug_print('Removing Orphaned Collection Records')
# Purge any collections references that point into the abyss
query = 'DELETE FROM booktags WHERE book_id NOT IN (SELECT _id FROM books)'
@@ -207,7 +207,7 @@ class PALADIN(USBMS):
query = 'DELETE FROM booktags WHERE tag_id NOT IN (SELECT _id FROM tags)'
cursor.execute(query)
- debug_print("Removing Orphaned Book Records")
+ debug_print('Removing Orphaned Book Records')
cursor.close()
except Exception:
@@ -249,7 +249,7 @@ class PALADIN(USBMS):
sequence_max = sequence_min
sequence_dirty = 0
- debug_print("Book Sequence Min: %d, Source Id: %d"%(sequence_min,source_id))
+ debug_print('Book Sequence Min: %d, Source Id: %d'%(sequence_min,source_id))
try:
cursor = connection.cursor()
@@ -283,7 +283,7 @@ class PALADIN(USBMS):
# If the database is 'dirty', then we should fix up the Ids and the sequence number
if sequence_dirty == 1:
- debug_print("Book Sequence Dirty for Source Id: %d"%source_id)
+ debug_print('Book Sequence Dirty for Source Id: %d'%source_id)
sequence_max = sequence_max + 1
for book, bookId in db_books.items():
if bookId < sequence_min:
@@ -302,7 +302,7 @@ class PALADIN(USBMS):
cursor.execute(query, t)
self.set_database_sequence_id(connection, 'books', sequence_max)
- debug_print("Book Sequence Max: %d, Source Id: %d"%(sequence_max,source_id))
+ debug_print('Book Sequence Max: %d, Source Id: %d'%(sequence_max,source_id))
cursor.close()
return db_books
@@ -386,7 +386,7 @@ class PALADIN(USBMS):
sequence_max = sequence_min
sequence_dirty = 0
- debug_print("Collection Sequence Min: %d, Source Id: %d"%(sequence_min,source_id))
+ debug_print('Collection Sequence Min: %d, Source Id: %d'%(sequence_min,source_id))
try:
cursor = connection.cursor()
@@ -415,7 +415,7 @@ class PALADIN(USBMS):
# If the database is 'dirty', then we should fix up the Ids and the sequence number
if sequence_dirty == 1:
- debug_print("Collection Sequence Dirty for Source Id: %d"%source_id)
+ debug_print('Collection Sequence Dirty for Source Id: %d'%source_id)
sequence_max = sequence_max + 1
for collection, collectionId in db_collections.items():
if collectionId < sequence_min:
@@ -434,13 +434,13 @@ class PALADIN(USBMS):
cursor.execute(query, t)
self.set_database_sequence_id(connection, 'tags', sequence_max)
- debug_print("Collection Sequence Max: %d, Source Id: %d"%(sequence_max,source_id))
+ debug_print('Collection Sequence Max: %d, Source Id: %d'%(sequence_max,source_id))
# Fix up the collections table now...
sequence_dirty = 0
sequence_max = sequence_min
- debug_print("Collections Sequence Min: %d, Source Id: %d"%(sequence_min,source_id))
+ debug_print('Collections Sequence Min: %d, Source Id: %d'%(sequence_min,source_id))
query = 'SELECT _id FROM booktags'
cursor.execute(query)
@@ -454,7 +454,7 @@ class PALADIN(USBMS):
sequence_max = max(sequence_max, row[0])
if sequence_dirty == 1:
- debug_print("Collections Sequence Dirty for Source Id: %d"%source_id)
+ debug_print('Collections Sequence Dirty for Source Id: %d'%source_id)
sequence_max = sequence_max + 1
for pairId in db_collection_pairs:
if pairId < sequence_min:
@@ -465,7 +465,7 @@ class PALADIN(USBMS):
sequence_max = sequence_max + 1
self.set_database_sequence_id(connection, 'booktags', sequence_max)
- debug_print("Collections Sequence Max: %d, Source Id: %d"%(sequence_max,source_id))
+ debug_print('Collections Sequence Max: %d, Source Id: %d'%(sequence_max,source_id))
cursor.close()
return db_collections
diff --git a/src/calibre/devices/prs505/sony_cache.py b/src/calibre/devices/prs505/sony_cache.py
index a676cfad08..864ff6d9e9 100644
--- a/src/calibre/devices/prs505/sony_cache.py
+++ b/src/calibre/devices/prs505/sony_cache.py
@@ -47,11 +47,11 @@ EMPTY_EXT_CACHE = b'''\
'''
MIME_MAP = {
- "lrf" : "application/x-sony-bbeb",
+ 'lrf' : 'application/x-sony-bbeb',
'lrx' : 'application/x-sony-bbeb',
- "rtf" : "application/rtf",
- "pdf" : "application/pdf",
- "txt" : "text/plain" ,
+ 'rtf' : 'application/rtf',
+ 'pdf' : 'application/pdf',
+ 'txt' : 'text/plain' ,
'epub': 'application/epub+zip',
}
@@ -71,9 +71,9 @@ def strptime(src):
def strftime(epoch, zone=time.localtime):
try:
- src = time.strftime("%w, %d %m %Y %H:%M:%S GMT", zone(epoch)).split()
+ src = time.strftime('%w, %d %m %Y %H:%M:%S GMT', zone(epoch)).split()
except:
- src = time.strftime("%w, %d %m %Y %H:%M:%S GMT", zone()).split()
+ src = time.strftime('%w, %d %m %Y %H:%M:%S GMT', zone()).split()
src[0] = INVERSE_DAY_MAP[int(src[0][:-1])]+','
src[2] = INVERSE_MONTH_MAP[int(src[2])]
@@ -460,7 +460,7 @@ class XMLCache:
if not self.is_sony_periodical(book):
return
record.set('conformsTo',
- "http://xmlns.sony.net/e-book/prs/periodicals/1.0/newspaper/1.0")
+ 'http://xmlns.sony.net/e-book/prs/periodicals/1.0/newspaper/1.0')
record.set('description', '')
@@ -649,10 +649,10 @@ class XMLCache:
debug_print("Use localtime TZ and tz='0' for new book", book.lpath)
elif ltz_count >= gtz_count:
tz = time.localtime
- debug_print("Use localtime TZ for new book", book.lpath)
+ debug_print('Use localtime TZ for new book', book.lpath)
else:
tz = time.gmtime
- debug_print("Use GMT TZ for new book", book.lpath)
+ debug_print('Use GMT TZ for new book', book.lpath)
date = strftime(timestamp, zone=tz)
record.set('date', clean(date))
try:
diff --git a/src/calibre/devices/prst1/driver.py b/src/calibre/devices/prst1/driver.py
index 47158e6bc2..2e678df3dc 100644
--- a/src/calibre/devices/prst1/driver.py
+++ b/src/calibre/devices/prst1/driver.py
@@ -167,7 +167,7 @@ class PRST1(USBMS):
bl = USBMS.books(self, oncard=oncard, end_session=end_session)
dbpath = self.normalize_path(prefix + DBPATH)
- debug_print("SQLite DB Path: " + dbpath)
+ debug_print('SQLite DB Path: ' + dbpath)
with closing(sqlite.connect(dbpath)) as connection:
# Replace undecodable characters in the db instead of erroring out
@@ -210,10 +210,10 @@ class PRST1(USBMS):
try:
device_offset = max(time_offsets, key=lambda a: time_offsets.get(a))
- debug_print("Device Offset: %d ms"%device_offset)
+ debug_print('Device Offset: %d ms'%device_offset)
self.device_offset = device_offset
except ValueError:
- debug_print("No Books To Detect Device Offset.")
+ debug_print('No Books To Detect Device Offset.')
for idx, book in enumerate(bl):
query = 'SELECT _id, thumbnail FROM books WHERE file_path = ?'
@@ -263,7 +263,7 @@ class PRST1(USBMS):
if self.plugboard_func:
plugboard = self.plugboard_func(self.__class__.__name__,
'device_db', self.plugboards)
- debug_print("PRST1: Using Plugboard", plugboard)
+ debug_print('PRST1: Using Plugboard', plugboard)
prefix = self._card_a_prefix if oncard == 'carda' else self._main_prefix
if prefix is None:
@@ -272,7 +272,7 @@ class PRST1(USBMS):
source_id = 1 if oncard == 'carda' else 0
dbpath = self.normalize_path(prefix + DBPATH)
- debug_print("SQLite DB Path: " + dbpath)
+ debug_print('SQLite DB Path: ' + dbpath)
collections = booklist.get_collections(collections_attributes)
@@ -290,7 +290,7 @@ class PRST1(USBMS):
try:
cursor = connection.cursor()
- debug_print("Removing Orphaned Collection Records")
+ debug_print('Removing Orphaned Collection Records')
# Purge any collections references that point into the abyss
query = 'DELETE FROM collections WHERE content_id NOT IN (SELECT _id FROM books)'
@@ -298,7 +298,7 @@ class PRST1(USBMS):
query = 'DELETE FROM collections WHERE collection_id NOT IN (SELECT _id FROM collection)'
cursor.execute(query)
- debug_print("Removing Orphaned Book Records")
+ debug_print('Removing Orphaned Book Records')
# Purge any references to books not in this database
# Idea is to prevent any spill-over where these wind up applying to some other book
@@ -362,7 +362,7 @@ class PRST1(USBMS):
sequence_max = sequence_min
sequence_dirty = 0
- debug_print("Book Sequence Min: %d, Source Id: %d"%(sequence_min,source_id))
+ debug_print('Book Sequence Min: %d, Source Id: %d'%(sequence_min,source_id))
try:
cursor = connection.cursor()
@@ -396,7 +396,7 @@ class PRST1(USBMS):
# If the database is 'dirty', then we should fix up the Ids and the sequence number
if sequence_dirty == 1:
- debug_print("Book Sequence Dirty for Source Id: %d"%source_id)
+ debug_print('Book Sequence Dirty for Source Id: %d'%source_id)
sequence_max = sequence_max + 1
for book, bookId in db_books.items():
if bookId < sequence_min:
@@ -433,7 +433,7 @@ class PRST1(USBMS):
cursor.execute(query, t)
self.set_database_sequence_id(connection, 'books', sequence_max)
- debug_print("Book Sequence Max: %d, Source Id: %d"%(sequence_max,source_id))
+ debug_print('Book Sequence Max: %d, Source Id: %d'%(sequence_max,source_id))
cursor.close()
return db_books
@@ -534,7 +534,7 @@ class PRST1(USBMS):
sequence_max = sequence_min
sequence_dirty = 0
- debug_print("Collection Sequence Min: %d, Source Id: %d"%(sequence_min,source_id))
+ debug_print('Collection Sequence Min: %d, Source Id: %d'%(sequence_min,source_id))
try:
cursor = connection.cursor()
@@ -563,7 +563,7 @@ class PRST1(USBMS):
# If the database is 'dirty', then we should fix up the Ids and the sequence number
if sequence_dirty == 1:
- debug_print("Collection Sequence Dirty for Source Id: %d"%source_id)
+ debug_print('Collection Sequence Dirty for Source Id: %d'%source_id)
sequence_max = sequence_max + 1
for collection, collectionId in db_collections.items():
if collectionId < sequence_min:
@@ -582,13 +582,13 @@ class PRST1(USBMS):
cursor.execute(query, t)
self.set_database_sequence_id(connection, 'collection', sequence_max)
- debug_print("Collection Sequence Max: %d, Source Id: %d"%(sequence_max,source_id))
+ debug_print('Collection Sequence Max: %d, Source Id: %d'%(sequence_max,source_id))
# Fix up the collections table now...
sequence_dirty = 0
sequence_max = sequence_min
- debug_print("Collections Sequence Min: %d, Source Id: %d"%(sequence_min,source_id))
+ debug_print('Collections Sequence Min: %d, Source Id: %d'%(sequence_min,source_id))
query = 'SELECT _id FROM collections'
cursor.execute(query)
@@ -602,7 +602,7 @@ class PRST1(USBMS):
sequence_max = max(sequence_max, row[0])
if sequence_dirty == 1:
- debug_print("Collections Sequence Dirty for Source Id: %d"%source_id)
+ debug_print('Collections Sequence Dirty for Source Id: %d'%source_id)
sequence_max = sequence_max + 1
for pairId in db_collection_pairs:
if pairId < sequence_min:
@@ -613,7 +613,7 @@ class PRST1(USBMS):
sequence_max = sequence_max + 1
self.set_database_sequence_id(connection, 'collections', sequence_max)
- debug_print("Collections Sequence Max: %d, Source Id: %d"%(sequence_max,source_id))
+ debug_print('Collections Sequence Max: %d, Source Id: %d'%(sequence_max,source_id))
cursor.close()
return db_collections
@@ -727,7 +727,7 @@ class PRST1(USBMS):
metadata.lpath = filepath.partition(prefix)[2]
metadata.lpath = metadata.lpath.replace('\\', '/')
dbpath = self.normalize_path(prefix + DBPATH)
- debug_print("SQLite DB Path: " + dbpath)
+ debug_print('SQLite DB Path: ' + dbpath)
with closing(sqlite.connect(dbpath)) as connection:
cursor = connection.cursor()
diff --git a/src/calibre/devices/smart_device_app/driver.py b/src/calibre/devices/smart_device_app/driver.py
index 2da096f7bc..136d7020e2 100644
--- a/src/calibre/devices/smart_device_app/driver.py
+++ b/src/calibre/devices/smart_device_app/driver.py
@@ -48,7 +48,7 @@ from polyglot.builtins import as_bytes, iteritems, itervalues
def synchronous(tlockname):
- """A decorator to place an instance based lock around a method """
+ '''A decorator to place an instance based lock around a method '''
def _synched(func):
@wraps(func)
@@ -96,7 +96,7 @@ class ConnectionListener(Thread):
if not self.all_ip_addresses:
self.all_ip_addresses = get_all_ips()
if self.all_ip_addresses:
- self.driver._debug("All IP addresses", self.all_ip_addresses)
+ self.driver._debug('All IP addresses', self.all_ip_addresses)
if not self.driver.connection_queue.empty():
d = currently_connected_device.device
@@ -485,7 +485,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
except:
today = time.localtime()
date = (today[0], today[1], today[2])
- template = "{title}_%d-%d-%d" % date
+ template = '{title}_%d-%d-%d' % date
use_subdirs = self.SUPPORTS_SUB_DIRS and settings.use_subdirs
from calibre.library.save_to_disk import config, get_components
@@ -854,7 +854,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
json_metadata[key]['book'] = self.json_codec.encode_book_metadata(book['book'])
json_metadata[key]['last_used'] = book['last_used']
result = as_bytes(json.dumps(json_metadata, indent=2, default=to_json))
- fd.write(("%0.7d\n"%(len(result)+1)).encode('ascii'))
+ fd.write(('%0.7d\n'%(len(result)+1)).encode('ascii'))
fd.write(result)
fd.write(b'\n')
count += 1
@@ -1014,14 +1014,14 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
from functools import partial
p = partial(prints, file=output)
if self.is_connected:
- p("A wireless device is connected")
+ p('A wireless device is connected')
return True
all_ip_addresses = get_all_ips()
if all_ip_addresses:
- p("All IP addresses", all_ip_addresses)
+ p('All IP addresses', all_ip_addresses)
else:
- p("No IP addresses found")
- p("No device is connected")
+ p('No IP addresses found')
+ p('No device is connected')
return False
@synchronous('sync_lock')
@@ -1123,7 +1123,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
self.client_device_name = result.get('deviceName', self.client_device_kind)
self._debug('Client device name', self.client_device_name)
- self.client_app_name = result.get('appName', "")
+ self.client_app_name = result.get('appName', '')
self._debug('Client app name', self.client_app_name)
self.app_version_number = result.get('ccVersionNumber', '0')
self._debug('App version #:', self.app_version_number)
@@ -1132,7 +1132,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
if (self.client_app_name == 'CalibreCompanion' and
self.app_version_number < self.CURRENT_CC_VERSION):
self._debug('Telling client to update')
- self._call_client("DISPLAY_MESSAGE",
+ self._call_client('DISPLAY_MESSAGE',
{'messageKind': self.MESSAGE_UPDATE_NEEDED,
'lastestKnownAppVersion': self.CURRENT_CC_VERSION})
except:
@@ -1185,7 +1185,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
# bad password
self._debug('password mismatch')
try:
- self._call_client("DISPLAY_MESSAGE",
+ self._call_client('DISPLAY_MESSAGE',
{'messageKind': self.MESSAGE_PASSWORD_ERROR,
'currentLibraryName': self.current_library_name,
'currentLibraryUUID': library_uuid})
@@ -1242,7 +1242,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
@synchronous('sync_lock')
def set_driveinfo_name(self, location_code, name):
- self._update_driveinfo_record(self.driveinfo, "main", name)
+ self._update_driveinfo_record(self.driveinfo, 'main', name)
self._call_client('SET_CALIBRE_DEVICE_NAME',
{'location_code': 'main', 'name':name})
@@ -1625,7 +1625,7 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
device_prefs.set_overrides(manage_device_metadata='on_connect')
def _show_message(self, message):
- self._call_client("DISPLAY_MESSAGE",
+ self._call_client('DISPLAY_MESSAGE',
{'messageKind': self.MESSAGE_SHOW_TOAST,
'message': message})
@@ -1685,8 +1685,8 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
self.have_bad_sync_columns = True
elif fm[self.is_read_sync_col]['datatype'] != 'bool':
self._debug('is_read_sync_col not bool type')
- self._show_message(_("The read sync column %s is "
- "not a Yes/No column")%self.is_read_sync_col)
+ self._show_message(_('The read sync column %s is '
+ 'not a Yes/No column')%self.is_read_sync_col)
self.have_bad_sync_columns = True
if self.is_read_date_sync_col:
@@ -1697,8 +1697,8 @@ class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
self.have_bad_sync_columns = True
elif fm[self.is_read_date_sync_col]['datatype'] != 'datetime':
self._debug('is_read_date_sync_col not date type')
- self._show_message(_("The read date sync column %s is "
- "not a date column")%self.is_read_date_sync_col)
+ self._show_message(_('The read date sync column %s is '
+ 'not a date column')%self.is_read_date_sync_col)
self.have_bad_sync_columns = True
self.have_checked_sync_columns = True
diff --git a/src/calibre/devices/usbms/device.py b/src/calibre/devices/usbms/device.py
index 1f51d8fb86..421098b535 100644
--- a/src/calibre/devices/usbms/device.py
+++ b/src/calibre/devices/usbms/device.py
@@ -673,7 +673,7 @@ class Device(DeviceConfig, DevicePlugin):
hal = get_hal()
vols = hal.get_volumes(d)
if verbose:
- print("FBSD:\t", vols)
+ print('FBSD:\t', vols)
ok, mv = hal.mount_volumes(vols)
if not ok:
diff --git a/src/calibre/devices/usbms/driver.py b/src/calibre/devices/usbms/driver.py
index a0199b2c52..20ff03b326 100644
--- a/src/calibre/devices/usbms/driver.py
+++ b/src/calibre/devices/usbms/driver.py
@@ -235,7 +235,7 @@ class USBMS(CLI, Device):
def update_booklist(filename, path, prefix):
changed = False
# Ignore AppleDouble files
- if filename.startswith("._"):
+ if filename.startswith('._'):
return False
if path_to_ext(filename) in all_formats and self.is_allowed_book_file(filename, path, prefix):
try:
diff --git a/src/calibre/devices/usbms/hal.py b/src/calibre/devices/usbms/hal.py
index 89880eadc7..f6ca5e87f6 100644
--- a/src/calibre/devices/usbms/hal.py
+++ b/src/calibre/devices/usbms/hal.py
@@ -87,7 +87,7 @@ class HAL:
time.sleep(1)
loops += 1
if loops > 100:
- raise Exception("ERROR: Timeout waiting for mount to complete")
+ raise Exception('ERROR: Timeout waiting for mount to complete')
return self.prop(vol['dev'], 'volume.mount_point')
def mount_volumes(self, volumes):
@@ -106,19 +106,19 @@ class HAL:
# Mount Point becomes Mount Path
mp += '/'
if DEBUG:
- print("FBSD:\tmounted", vol['label'], "on", mp)
+ print('FBSD:\tmounted', vol['label'], 'on', mp)
if mtd == 0:
ans['_main_prefix'], ans['_main_vol'] = mp, vol['vol']
if DEBUG:
- print("FBSD:\tmain = ", mp)
+ print('FBSD:\tmain = ', mp)
elif mtd == 1:
ans['_card_a_prefix'], ans['_card_a_vol'] = mp, vol['vol']
if DEBUG:
- print("FBSD:\tcard a = ", mp)
+ print('FBSD:\tcard a = ', mp)
elif mtd == 2:
ans['_card_b_prefix'], ans['_card_b_vol'] = mp, vol['vol']
if DEBUG:
- print("FBSD:\tcard b = ", mp)
+ print('FBSD:\tcard b = ', mp)
break
mtd += 1
diff --git a/src/calibre/devices/user_defined/driver.py b/src/calibre/devices/user_defined/driver.py
index 8b4a75dd4f..c1f421b561 100644
--- a/src/calibre/devices/user_defined/driver.py
+++ b/src/calibre/devices/user_defined/driver.py
@@ -58,7 +58,7 @@ class USER_DEFINED(USBMS):
'Enter the folder where the books are to be stored. This folder '
'is prepended to any send_to_device template') + '',
_('Swap main and card A') + ':::
' + _(
- 'Check this box if the device\'s main memory is being seen as '
+ "Check this box if the device's main memory is being seen as "
'card a and the card is being seen as main memory') + '
',
]
EXTRA_CUSTOMIZATION_DEFAULT = [
diff --git a/src/calibre/devices/utils.py b/src/calibre/devices/utils.py
index 071c3be9b6..763d967115 100644
--- a/src/calibre/devices/utils.py
+++ b/src/calibre/devices/utils.py
@@ -34,11 +34,11 @@ def sanity_check(on_card, files, card_prefixes, free_space):
size += os.path.getsize(getattr(f, 'name', f))
if not on_card and size > free_space[0] - 2*1024*1024:
- raise FreeSpaceError(_("There is insufficient free space in main memory"))
+ raise FreeSpaceError(_('There is insufficient free space in main memory'))
if on_card == 'carda' and size > free_space[1] - 1024*1024:
- raise FreeSpaceError(_("There is insufficient free space on the storage card"))
+ raise FreeSpaceError(_('There is insufficient free space on the storage card'))
if on_card == 'cardb' and size > free_space[2] - 1024*1024:
- raise FreeSpaceError(_("There is insufficient free space on the storage card"))
+ raise FreeSpaceError(_('There is insufficient free space on the storage card'))
def build_template_regexp(template):
@@ -91,7 +91,7 @@ def create_upload_path(mdata, fname, template, sanitize,
except:
today = time.localtime()
date = (today[0], today[1], today[2])
- template = "{title}_%d-%d-%d" % date
+ template = '{title}_%d-%d-%d' % date
fname = sanitize(fname)
ext = path_type.splitext(fname)[1]
diff --git a/src/calibre/devices/winusb.py b/src/calibre/devices/winusb.py
index a7839e795a..406548878c 100644
--- a/src/calibre/devices/winusb.py
+++ b/src/calibre/devices/winusb.py
@@ -47,10 +47,10 @@ except ImportError:
class GUID(Structure):
_fields_ = [
- ("data1", DWORD),
- ("data2", WORD),
- ("data3", WORD),
- ("data4", c_ubyte * 8)]
+ ('data1', DWORD),
+ ('data2', WORD),
+ ('data3', WORD),
+ ('data4', c_ubyte * 8)]
def __init__(self, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8):
self.data1 = l
@@ -66,12 +66,12 @@ class GUID(Structure):
self.data4[7] = b8
def __str__(self):
- return "{{{:08x}-{:04x}-{:04x}-{}-{}}}".format(
+ return '{{{:08x}-{:04x}-{:04x}-{}-{}}}'.format(
self.data1,
self.data2,
self.data3,
- ''.join(["%02x" % d for d in self.data4[:2]]),
- ''.join(["%02x" % d for d in self.data4[2:]]),
+ ''.join(['%02x' % d for d in self.data4[:2]]),
+ ''.join(['%02x' % d for d in self.data4[2:]]),
)
@@ -130,7 +130,7 @@ class SP_DEVINFO_DATA(Structure):
]
def __str__(self):
- return f"ClassGuid:{self.ClassGuid} DevInst:{self.DevInst}"
+ return f'ClassGuid:{self.ClassGuid} DevInst:{self.DevInst}'
PSP_DEVINFO_DATA = POINTER(SP_DEVINFO_DATA)
@@ -145,7 +145,7 @@ class SP_DEVICE_INTERFACE_DATA(Structure):
]
def __str__(self):
- return f"InterfaceClassGuid:{self.InterfaceClassGuid} Flags:{self.Flags}"
+ return f'InterfaceClassGuid:{self.InterfaceClassGuid} Flags:{self.Flags}'
ANYSIZE_ARRAY = 1
@@ -153,8 +153,8 @@ ANYSIZE_ARRAY = 1
class SP_DEVICE_INTERFACE_DETAIL_DATA(Structure):
_fields_ = [
- ("cbSize", DWORD),
- ("DevicePath", c_wchar*ANYSIZE_ARRAY)
+ ('cbSize', DWORD),
+ ('DevicePath', c_wchar*ANYSIZE_ARRAY)
]
diff --git a/src/calibre/ebooks/chardet.py b/src/calibre/ebooks/chardet.py
index 402d28bd3c..bb2a83ee9d 100644
--- a/src/calibre/ebooks/chardet.py
+++ b/src/calibre/ebooks/chardet.py
@@ -100,7 +100,7 @@ def find_declared_encoding(raw, limit=50*1024):
return ans
-_CHARSET_ALIASES = {"macintosh" : "mac-roman", "x-sjis" : "shift-jis", 'mac-centraleurope': 'cp1250'}
+_CHARSET_ALIASES = {'macintosh' : 'mac-roman', 'x-sjis' : 'shift-jis', 'mac-centraleurope': 'cp1250'}
def detect(bytestring):
diff --git a/src/calibre/ebooks/chm/metadata.py b/src/calibre/ebooks/chm/metadata.py
index 18736d2ecc..ced2008fb1 100644
--- a/src/calibre/ebooks/chm/metadata.py
+++ b/src/calibre/ebooks/chm/metadata.py
@@ -22,11 +22,11 @@ def _clean(s):
def _detag(tag):
- ans = ""
+ ans = ''
if tag is None:
return ans
for elem in tag:
- if hasattr(elem, "contents"):
+ if hasattr(elem, 'contents'):
ans += _detag(elem)
else:
ans += _clean(elem)
@@ -119,7 +119,7 @@ def _get_cover(soup, rdr):
try:
ans = rdr.GetFile(ans)
except:
- ans = rdr.root + "/" + ans
+ ans = rdr.root + '/' + ans
try:
ans = rdr.GetFile(ans)
except:
diff --git a/src/calibre/ebooks/chm/reader.py b/src/calibre/ebooks/chm/reader.py
index 9bdd1a1c99..cf3c02b0e8 100644
--- a/src/calibre/ebooks/chm/reader.py
+++ b/src/calibre/ebooks/chm/reader.py
@@ -73,7 +73,7 @@ class CHMReader(CHMFile):
# location of '.hhc' file, which is the CHM TOC.
base = self.topics or self.home
self.root = os.path.splitext(base.lstrip('/'))[0]
- self.hhc_path = self.root + ".hhc"
+ self.hhc_path = self.root + '.hhc'
def relpath_to_first_html_file(self):
# See https://www.nongnu.org/chmspec/latest/Internal.html#SYSTEM
@@ -170,10 +170,10 @@ class CHMReader(CHMFile):
path = '/' + path
res, ui = self.ResolveObject(path)
if res != chmlib.CHM_RESOLVE_SUCCESS:
- raise CHMError(f"Unable to locate {path!r} within CHM file {self.filename!r}")
+ raise CHMError(f'Unable to locate {path!r} within CHM file {self.filename!r}')
size, data = self.RetrieveObject(ui)
if size == 0:
- raise CHMError(f"{path!r} is zero bytes in length!")
+ raise CHMError(f'{path!r} is zero bytes in length!')
return data
def get_home(self):
@@ -254,7 +254,7 @@ class CHMReader(CHMFile):
soup = BeautifulSoup(data)
except ValueError:
# hit some strange encoding problems...
- self.log.exception("Unable to parse html for cleaning, leaving it")
+ self.log.exception('Unable to parse html for cleaning, leaving it')
return data
# nuke javascript...
[s.extract() for s in soup('script')]
diff --git a/src/calibre/ebooks/conversion/cli.py b/src/calibre/ebooks/conversion/cli.py
index 0b43300c2e..beb3496656 100644
--- a/src/calibre/ebooks/conversion/cli.py
+++ b/src/calibre/ebooks/conversion/cli.py
@@ -151,10 +151,10 @@ def recipe_test(option, opt_str, value, parser):
for arg in parser.rargs:
# stop on --foo like options
- if arg[:2] == "--":
+ if arg[:2] == '--':
break
# stop on -a, but not on -3 or -3.0
- if arg[:1] == "-" and len(arg) > 1 and not floatable(arg):
+ if arg[:1] == '-' and len(arg) > 1 and not floatable(arg):
break
try:
value.append(int(arg))
diff --git a/src/calibre/ebooks/conversion/plugins/chm_input.py b/src/calibre/ebooks/conversion/plugins/chm_input.py
index 4fc30bf8de..267e1725a4 100644
--- a/src/calibre/ebooks/conversion/plugins/chm_input.py
+++ b/src/calibre/ebooks/conversion/plugins/chm_input.py
@@ -132,7 +132,7 @@ class CHMInput(InputFormatPlugin):
# print(etree.tostring(hhcroot, pretty_print=True))
# print("=============================")
log.debug('Found %d section nodes' % toc.count())
- htmlpath = os.path.splitext(hhcpath)[0] + ".html"
+ htmlpath = os.path.splitext(hhcpath)[0] + '.html'
base = os.path.dirname(os.path.abspath(htmlpath))
def unquote(x):
diff --git a/src/calibre/ebooks/conversion/plugins/comic_input.py b/src/calibre/ebooks/conversion/plugins/comic_input.py
index 08bfd6c437..aaa6df1cbf 100644
--- a/src/calibre/ebooks/conversion/plugins/comic_input.py
+++ b/src/calibre/ebooks/conversion/plugins/comic_input.py
@@ -44,8 +44,8 @@ class ComicInput(InputFormatPlugin):
OptionRecommendation(name='landscape', recommended_value=False,
help=_("Don't split landscape images into two portrait images")),
OptionRecommendation(name='wide', recommended_value=False,
- help=_("Keep aspect ratio and scale image using screen height as "
- "image width for viewing in landscape mode.")),
+ help=_('Keep aspect ratio and scale image using screen height as '
+ 'image width for viewing in landscape mode.')),
OptionRecommendation(name='right2left', recommended_value=False,
help=_('Used for right-to-left publications like manga. '
'Causes landscape pages to be split into portrait pages '
@@ -62,7 +62,7 @@ class ComicInput(InputFormatPlugin):
'are converted to. You can experiment to see which format gives '
'you optimal size and look on your device.')),
OptionRecommendation(name='no_process', recommended_value=False,
- help=_("Apply no processing to the image")),
+ help=_('Apply no processing to the image')),
OptionRecommendation(name='dont_grayscale', recommended_value=False,
help=_('Do not convert the image to grayscale (black and white)')),
OptionRecommendation(name='comic_image_size', recommended_value=None,
diff --git a/src/calibre/ebooks/conversion/plugins/djvu_input.py b/src/calibre/ebooks/conversion/plugins/djvu_input.py
index dd38176da4..34587376ed 100644
--- a/src/calibre/ebooks/conversion/plugins/djvu_input.py
+++ b/src/calibre/ebooks/conversion/plugins/djvu_input.py
@@ -28,7 +28,7 @@ class DJVUInput(InputFormatPlugin):
raise ValueError('The DJVU file contains no text, only images, probably page scans.'
' calibre only supports conversion of DJVU files with actual text in them.')
- html = convert_basic(raw_text.replace(b"\n", b' ').replace(
+ html = convert_basic(raw_text.replace(b'\n', b' ').replace(
b'\037', b'\n\n'))
# Run the HTMLized text through the html processing plugin.
from calibre.customize.ui import plugin_for_input_format
diff --git a/src/calibre/ebooks/conversion/plugins/epub_input.py b/src/calibre/ebooks/conversion/plugins/epub_input.py
index f919e64e34..a2404dfd8c 100644
--- a/src/calibre/ebooks/conversion/plugins/epub_input.py
+++ b/src/calibre/ebooks/conversion/plugins/epub_input.py
@@ -244,7 +244,7 @@ class EPUBInput(InputFormatPlugin):
with open('META-INF/container.xml', 'rb') as f:
root = safe_xml_fromstring(f.read())
for r in root.xpath('//*[local-name()="rootfile"]'):
- if attr(r, 'media-type') != "application/oebps-package+xml":
+ if attr(r, 'media-type') != 'application/oebps-package+xml':
continue
path = attr(r, 'full-path')
if not path:
diff --git a/src/calibre/ebooks/conversion/plugins/epub_output.py b/src/calibre/ebooks/conversion/plugins/epub_output.py
index 39ce9c7b4c..4662569008 100644
--- a/src/calibre/ebooks/conversion/plugins/epub_output.py
+++ b/src/calibre/ebooks/conversion/plugins/epub_output.py
@@ -77,7 +77,7 @@ class EPUBOutput(OutputFormatPlugin):
),
OptionRecommendation(name='no_default_epub_cover', recommended_value=False,
- help=_('Normally, if the input file has no cover and you don\'t'
+ help=_("Normally, if the input file has no cover and you don't"
' specify one, a default cover is generated with the title, '
'authors, etc. This option disables the generation of this cover.')
),
diff --git a/src/calibre/ebooks/conversion/plugins/fb2_input.py b/src/calibre/ebooks/conversion/plugins/fb2_input.py
index e192c58071..c14d681735 100644
--- a/src/calibre/ebooks/conversion/plugins/fb2_input.py
+++ b/src/calibre/ebooks/conversion/plugins/fb2_input.py
@@ -1,8 +1,8 @@
__license__ = 'GPL v3'
__copyright__ = '2008, Anatoly Shipitsin
'
-"""
+'''
Convert .fb2 files to .lrf
-"""
+'''
import os
import re
@@ -91,7 +91,7 @@ class FB2Input(InputFormatPlugin):
log.debug('Converting XML to HTML...')
with open(P('templates/fb2.xsl'), 'rb') as f:
ss = f.read().decode('utf-8')
- ss = ss.replace("__FB_NS__", fb_ns)
+ ss = ss.replace('__FB_NS__', fb_ns)
if options.no_inline_fb2_toc:
log('Disabling generation of inline FB2 TOC')
ss = re.compile(r'.*',
diff --git a/src/calibre/ebooks/conversion/plugins/html_output.py b/src/calibre/ebooks/conversion/plugins/html_output.py
index 9a0a61ee85..ea64c70cb3 100644
--- a/src/calibre/ebooks/conversion/plugins/html_output.py
+++ b/src/calibre/ebooks/conversion/plugins/html_output.py
@@ -213,7 +213,7 @@ class HTMLOutput(OutputFormatPlugin):
f.write(t.encode('utf-8'))
item.unload_data_from_memory(memory=path)
- zfile = zipfile.ZipFile(output_path, "w")
+ zfile = zipfile.ZipFile(output_path, 'w')
zfile.add_dir(output_dir, basename(output_dir))
zfile.write(output_file, basename(output_file), zipfile.ZIP_DEFLATED)
diff --git a/src/calibre/ebooks/conversion/plugins/htmlz_output.py b/src/calibre/ebooks/conversion/plugins/htmlz_output.py
index 3c87c57492..98b5c6057b 100644
--- a/src/calibre/ebooks/conversion/plugins/htmlz_output.py
+++ b/src/calibre/ebooks/conversion/plugins/htmlz_output.py
@@ -39,7 +39,7 @@ class HTMLZOutput(OutputFormatPlugin):
OptionRecommendation(name='htmlz_class_style', recommended_value='external',
level=OptionRecommendation.LOW,
choices=list(ui_data['sheet_choices']),
- help=_('How to handle the CSS when using css-type = \'class\'.\n'
+ help=_("How to handle the CSS when using css-type = 'class'.\n"
'Default is external.\n'
'external: {external}\n'
'inline: {inline}'
diff --git a/src/calibre/ebooks/conversion/plugins/lrf_output.py b/src/calibre/ebooks/conversion/plugins/lrf_output.py
index b3d11d52a3..dfcc15dcb4 100644
--- a/src/calibre/ebooks/conversion/plugins/lrf_output.py
+++ b/src/calibre/ebooks/conversion/plugins/lrf_output.py
@@ -102,7 +102,7 @@ class LRFOutput(OutputFormatPlugin):
OptionRecommendation(name='header', recommended_value=False,
help=_('Add a header to all the pages with title and author.')
),
- OptionRecommendation(name='header_format', recommended_value="%t by %a",
+ OptionRecommendation(name='header_format', recommended_value='%t by %a',
help=_('Set the format of the header. %a is replaced by the author '
'and %t by the title. Default is %default')
),
diff --git a/src/calibre/ebooks/conversion/plugins/mobi_output.py b/src/calibre/ebooks/conversion/plugins/mobi_output.py
index 15278d2838..cfe03aa79e 100644
--- a/src/calibre/ebooks/conversion/plugins/mobi_output.py
+++ b/src/calibre/ebooks/conversion/plugins/mobi_output.py
@@ -49,7 +49,7 @@ class MOBIOutput(OutputFormatPlugin):
),
OptionRecommendation(name='no_inline_toc',
recommended_value=False, level=OptionRecommendation.LOW,
- help=_('Don\'t add Table of Contents to the book. Useful if '
+ help=_("Don't add Table of Contents to the book. Useful if "
'the book has its own table of contents.')),
OptionRecommendation(name='toc_title', recommended_value=None,
help=_('Title for any generated inline table of contents.')
@@ -280,7 +280,7 @@ class AZW3Output(OutputFormatPlugin):
),
OptionRecommendation(name='no_inline_toc',
recommended_value=False, level=OptionRecommendation.LOW,
- help=_('Don\'t add Table of Contents to the book. Useful if '
+ help=_("Don't add Table of Contents to the book. Useful if "
'the book has its own table of contents.')),
OptionRecommendation(name='toc_title', recommended_value=None,
help=_('Title for any generated inline table of contents.')
diff --git a/src/calibre/ebooks/conversion/plugins/snb_input.py b/src/calibre/ebooks/conversion/plugins/snb_input.py
index 7fbb2f773a..19da8ae2f9 100644
--- a/src/calibre/ebooks/conversion/plugins/snb_input.py
+++ b/src/calibre/ebooks/conversion/plugins/snb_input.py
@@ -33,16 +33,16 @@ class SNBInput(InputFormatPlugin):
from calibre.ebooks.snb.snbfile import SNBFile
from calibre.utils.xml_parse import safe_xml_fromstring
- log.debug("Parsing SNB file...")
+ log.debug('Parsing SNB file...')
snbFile = SNBFile()
try:
snbFile.Parse(stream)
except:
- raise ValueError("Invalid SNB file")
+ raise ValueError('Invalid SNB file')
if not snbFile.IsValid():
- log.debug("Invalid SNB file")
- raise ValueError("Invalid SNB file")
- log.debug("Handle meta data ...")
+ log.debug('Invalid SNB file')
+ raise ValueError('Invalid SNB file')
+ log.debug('Handle meta data ...')
from calibre.ebooks.conversion.plumber import create_oebbook
oeb = create_oebbook(log, None, options,
encoding=options.input_encoding, populate=False)
diff --git a/src/calibre/ebooks/conversion/plugins/snb_output.py b/src/calibre/ebooks/conversion/plugins/snb_output.py
index 3a74b634f9..cfffbde523 100644
--- a/src/calibre/ebooks/conversion/plugins/snb_output.py
+++ b/src/calibre/ebooks/conversion/plugins/snb_output.py
@@ -97,27 +97,27 @@ class SNBOutput(OutputFormatPlugin):
href = g['cover'].href
# Output book info file
- bookInfoTree = etree.Element("book-snbf", version="1.0")
- headTree = etree.SubElement(bookInfoTree, "head")
- etree.SubElement(headTree, "name").text = title
- etree.SubElement(headTree, "author").text = ' '.join(authors)
- etree.SubElement(headTree, "language").text = lang
- etree.SubElement(headTree, "rights")
- etree.SubElement(headTree, "publisher").text = publishers
- etree.SubElement(headTree, "generator").text = __appname__ + ' ' + __version__
- etree.SubElement(headTree, "created")
- etree.SubElement(headTree, "abstract").text = abstract
+ bookInfoTree = etree.Element('book-snbf', version='1.0')
+ headTree = etree.SubElement(bookInfoTree, 'head')
+ etree.SubElement(headTree, 'name').text = title
+ etree.SubElement(headTree, 'author').text = ' '.join(authors)
+ etree.SubElement(headTree, 'language').text = lang
+ etree.SubElement(headTree, 'rights')
+ etree.SubElement(headTree, 'publisher').text = publishers
+ etree.SubElement(headTree, 'generator').text = __appname__ + ' ' + __version__
+ etree.SubElement(headTree, 'created')
+ etree.SubElement(headTree, 'abstract').text = abstract
if href is not None:
- etree.SubElement(headTree, "cover").text = ProcessFileName(href)
+ etree.SubElement(headTree, 'cover').text = ProcessFileName(href)
else:
- etree.SubElement(headTree, "cover")
+ etree.SubElement(headTree, 'cover')
with open(os.path.join(snbfDir, 'book.snbf'), 'wb') as f:
f.write(etree.tostring(bookInfoTree, pretty_print=True, encoding='utf-8'))
# Output TOC
- tocInfoTree = etree.Element("toc-snbf")
- tocHead = etree.SubElement(tocInfoTree, "head")
- tocBody = etree.SubElement(tocInfoTree, "body")
+ tocInfoTree = etree.Element('toc-snbf')
+ tocHead = etree.SubElement(tocInfoTree, 'head')
+ tocBody = etree.SubElement(tocInfoTree, 'body')
outputFiles = {}
if oeb_book.toc.count() == 0:
log.warn('This SNB file has no Table of Contents. '
@@ -131,11 +131,11 @@ class SNBOutput(OutputFormatPlugin):
# "Cover Pages".
# oeb_book.toc does not support "insert", so we generate
# the tocInfoTree directly instead of modifying the toc
- ch = etree.SubElement(tocBody, "chapter")
- ch.set("src", ProcessFileName(first.href) + ".snbc")
+ ch = etree.SubElement(tocBody, 'chapter')
+ ch.set('src', ProcessFileName(first.href) + '.snbc')
ch.text = _('Cover pages')
outputFiles[first.href] = []
- outputFiles[first.href].append(("", _("Cover pages")))
+ outputFiles[first.href].append(('', _('Cover pages')))
for tocitem in oeb_book.toc:
if tocitem.href.find('#') != -1:
@@ -147,23 +147,23 @@ class SNBOutput(OutputFormatPlugin):
outputFiles[item[0]].append((item[1], tocitem.title))
else:
outputFiles[item[0]] = []
- if "" not in outputFiles[item[0]]:
- outputFiles[item[0]].append(("", tocitem.title + _(" (Preface)")))
- ch = etree.SubElement(tocBody, "chapter")
- ch.set("src", ProcessFileName(item[0]) + ".snbc")
- ch.text = tocitem.title + _(" (Preface)")
+ if '' not in outputFiles[item[0]]:
+ outputFiles[item[0]].append(('', tocitem.title + _(' (Preface)')))
+ ch = etree.SubElement(tocBody, 'chapter')
+ ch.set('src', ProcessFileName(item[0]) + '.snbc')
+ ch.text = tocitem.title + _(' (Preface)')
outputFiles[item[0]].append((item[1], tocitem.title))
else:
if tocitem.href in outputFiles:
- outputFiles[tocitem.href].append(("", tocitem.title))
+ outputFiles[tocitem.href].append(('', tocitem.title))
else:
outputFiles[tocitem.href] = []
- outputFiles[tocitem.href].append(("", tocitem.title))
- ch = etree.SubElement(tocBody, "chapter")
- ch.set("src", ProcessFileName(tocitem.href) + ".snbc")
+ outputFiles[tocitem.href].append(('', tocitem.title))
+ ch = etree.SubElement(tocBody, 'chapter')
+ ch.set('src', ProcessFileName(tocitem.href) + '.snbc')
ch.text = tocitem.title
- etree.SubElement(tocHead, "chapters").text = '%d' % len(tocBody)
+ etree.SubElement(tocHead, 'chapters').text = '%d' % len(tocBody)
with open(os.path.join(snbfDir, 'toc.snbf'), 'wb') as f:
f.write(etree.tostring(tocInfoTree, pretty_print=True, encoding='utf-8'))
@@ -194,13 +194,13 @@ class SNBOutput(OutputFormatPlugin):
postfix = ''
if subName != '':
postfix = '_' + subName
- lastName = ProcessFileName(item.href + postfix + ".snbc")
+ lastName = ProcessFileName(item.href + postfix + '.snbc')
oldTree = snbcTrees[subName]
with open(os.path.join(snbcDir, lastName), 'wb') as f:
f.write(etree.tostring(oldTree, pretty_print=True, encoding='utf-8'))
else:
log.debug('Merge %s with last TOC item...' % item.href)
- snbwriter.merge_content(oldTree, oeb_book, item, [('', _("Start"))], opts)
+ snbwriter.merge_content(oldTree, oeb_book, item, [('', _('Start'))], opts)
# Output the last one if needed
log.debug('Output the last modified chapter again: %s' % lastName)
diff --git a/src/calibre/ebooks/conversion/plugins/txt_input.py b/src/calibre/ebooks/conversion/plugins/txt_input.py
index 048a02c316..c4d620720d 100644
--- a/src/calibre/ebooks/conversion/plugins/txt_input.py
+++ b/src/calibre/ebooks/conversion/plugins/txt_input.py
@@ -21,7 +21,7 @@ MD_EXTENSIONS = {
'meta': _('Metadata in the document'),
'nl2br': _('Treat newlines as hard breaks'),
'sane_lists': _('Do not allow mixing list types'),
- 'smarty': _('Use Markdown\'s internal smartypants parser'),
+ 'smarty': _("Use Markdown's internal smartypants parser"),
'tables': _('Support tables'),
'toc': _('Generate a table of contents'),
'wikilinks': _('Wiki style links'),
@@ -43,7 +43,7 @@ class TXTInput(InputFormatPlugin):
'single': _('Assume every line is a paragraph'),
'print': _('Assume every line starting with 2+ spaces or a tab starts a paragraph'),
'unformatted': _('Most lines have hard line breaks, few/no blank lines or indents'),
- 'off': _('Don\'t modify the paragraph structure'),
+ 'off': _("Don't modify the paragraph structure"),
},
'formatting_types': {
'auto': _('Automatically decide which formatting processor to use'),
@@ -83,7 +83,7 @@ class TXTInput(InputFormatPlugin):
OptionRecommendation(name='txt_in_remove_indents', recommended_value=False,
help=_('Normally extra space at the beginning of lines is retained. '
'With this option they will be removed.')),
- OptionRecommendation(name="markdown_extensions", recommended_value='footnotes, tables, toc',
+ OptionRecommendation(name='markdown_extensions', recommended_value='footnotes, tables, toc',
help=_('Enable extensions to Markdown syntax. Extensions are formatting that is not part '
'of the standard Markdown format. The extensions enabled by default: %default.\n'
'To learn more about Markdown extensions, see {}\n'
diff --git a/src/calibre/ebooks/conversion/plugins/txt_output.py b/src/calibre/ebooks/conversion/plugins/txt_output.py
index d90ff6d802..faea8f6b56 100644
--- a/src/calibre/ebooks/conversion/plugins/txt_output.py
+++ b/src/calibre/ebooks/conversion/plugins/txt_output.py
@@ -30,9 +30,9 @@ class TXTOutput(OutputFormatPlugin):
OptionRecommendation(name='newline', recommended_value='system',
level=OptionRecommendation.LOW,
short_switch='n', choices=NEWLINE_TYPES,
- help=_('Type of newline to use. Options are %s. Default is \'system\'. '
- 'Use \'old_mac\' for compatibility with Mac OS 9 and earlier. '
- 'For macOS use \'unix\'. \'system\' will default to the newline '
+ help=_("Type of newline to use. Options are %s. Default is 'system'. "
+ "Use 'old_mac' for compatibility with Mac OS 9 and earlier. "
+ "For macOS use 'unix'. 'system' will default to the newline "
'type used by this OS.') % sorted(NEWLINE_TYPES)),
OptionRecommendation(name='txt_output_encoding', recommended_value='utf-8',
level=OptionRecommendation.LOW,
diff --git a/src/calibre/ebooks/conversion/plumber.py b/src/calibre/ebooks/conversion/plumber.py
index 69dcdcf9ec..9f9699724c 100644
--- a/src/calibre/ebooks/conversion/plumber.py
+++ b/src/calibre/ebooks/conversion/plumber.py
@@ -187,7 +187,7 @@ OptionRecommendation(name='disable_font_rescaling',
OptionRecommendation(name='minimum_line_height',
recommended_value=120.0, level=OptionRecommendation.LOW,
help=_(
- 'The minimum line height, as a percentage of the element\'s '
+ "The minimum line height, as a percentage of the element's "
'calculated font size. calibre will ensure that every element '
'has a line height of at least this setting, irrespective of '
'what the input document specifies. Set to zero to disable. '
diff --git a/src/calibre/ebooks/conversion/preprocess.py b/src/calibre/ebooks/conversion/preprocess.py
index 83a8a9dadd..bd843c082b 100644
--- a/src/calibre/ebooks/conversion/preprocess.py
+++ b/src/calibre/ebooks/conversion/preprocess.py
@@ -200,8 +200,8 @@ class Dehyphenator:
"((ed)?ly|'?e?s||a?(t|s)?ion(s|al(ly)?)?|ings?|er|(i)?ous|"
"(i|a)ty|(it)?ies|ive|gence|istic(ally)?|(e|a)nce|m?ents?|ism|ated|"
"(e|u)ct(ed)?|ed|(i|ed)?ness|(e|a)ncy|ble|ier|al|ex|ian)$")
- self.suffixes = re.compile(r"^%s" % self.suffix_string, re.IGNORECASE)
- self.removesuffixes = re.compile(r"%s" % self.suffix_string, re.IGNORECASE)
+ self.suffixes = re.compile(r'^%s' % self.suffix_string, re.IGNORECASE)
+ self.removesuffixes = re.compile(r'%s' % self.suffix_string, re.IGNORECASE)
# remove prefixes if the prefix was not already the point of hyphenation
self.prefix_string = '^(dis|re|un|in|ex)'
self.prefixes = re.compile(r'%s$' % self.prefix_string, re.IGNORECASE)
@@ -214,7 +214,7 @@ class Dehyphenator:
wraptags = match.group('wraptags')
except:
wraptags = ''
- hyphenated = str(firsthalf) + "-" + str(secondhalf)
+ hyphenated = str(firsthalf) + '-' + str(secondhalf)
dehyphenated = str(firsthalf) + str(secondhalf)
if self.suffixes.match(secondhalf) is None:
lookupword = self.removesuffixes.sub('', dehyphenated)
@@ -223,7 +223,7 @@ class Dehyphenator:
if len(firsthalf) > 4 and self.prefixes.match(firsthalf) is None:
lookupword = self.removeprefix.sub('', lookupword)
if self.verbose > 2:
- self.log("lookup word is: "+lookupword+", orig is: " + hyphenated)
+ self.log('lookup word is: '+lookupword+', orig is: ' + hyphenated)
try:
searchresult = self.html.find(lookupword.lower())
except:
@@ -231,33 +231,33 @@ class Dehyphenator:
if self.format == 'html_cleanup' or self.format == 'txt_cleanup':
if self.html.find(lookupword) != -1 or searchresult != -1:
if self.verbose > 2:
- self.log(" Cleanup:returned dehyphenated word: " + dehyphenated)
+ self.log(' Cleanup:returned dehyphenated word: ' + dehyphenated)
return dehyphenated
elif self.html.find(hyphenated) != -1:
if self.verbose > 2:
- self.log(" Cleanup:returned hyphenated word: " + hyphenated)
+ self.log(' Cleanup:returned hyphenated word: ' + hyphenated)
return hyphenated
else:
if self.verbose > 2:
- self.log(" Cleanup:returning original text "+firsthalf+" + linefeed "+secondhalf)
+ self.log(' Cleanup:returning original text '+firsthalf+' + linefeed '+secondhalf)
return firsthalf+'\u2014'+wraptags+secondhalf
else:
if self.format == 'individual_words' and len(firsthalf) + len(secondhalf) <= 6:
if self.verbose > 2:
- self.log("too short, returned hyphenated word: " + hyphenated)
+ self.log('too short, returned hyphenated word: ' + hyphenated)
return hyphenated
if len(firsthalf) <= 2 and len(secondhalf) <= 2:
if self.verbose > 2:
- self.log("too short, returned hyphenated word: " + hyphenated)
+ self.log('too short, returned hyphenated word: ' + hyphenated)
return hyphenated
if self.html.find(lookupword) != -1 or searchresult != -1:
if self.verbose > 2:
- self.log(" returned dehyphenated word: " + dehyphenated)
+ self.log(' returned dehyphenated word: ' + dehyphenated)
return dehyphenated
else:
if self.verbose > 2:
- self.log(" returned hyphenated word: " + hyphenated)
+ self.log(' returned hyphenated word: ' + hyphenated)
return hyphenated
def __call__(self, html, format, length=1):
@@ -461,7 +461,7 @@ class HTMLPreProcessor:
return re.search('<]*id=BookTitle', raw) is not None
def is_pdftohtml(self, src):
- return '' in src[:1000]
+ return "" in src[:1000]
def __call__(self, html, remove_special_chars=None,
get_preprocess_html=False):
@@ -617,7 +617,7 @@ class HTMLPreProcessor:
html = preprocessor(html)
if is_pdftohtml:
- html = html.replace('', '')
+ html = html.replace("", '')
if getattr(self.extra_opts, 'smarten_punctuation', False):
html = smarten_punctuation(html, self.log)
diff --git a/src/calibre/ebooks/conversion/utils.py b/src/calibre/ebooks/conversion/utils.py
index 5c074a69f1..915ff89d05 100644
--- a/src/calibre/ebooks/conversion/utils.py
+++ b/src/calibre/ebooks/conversion/utils.py
@@ -33,16 +33,16 @@ class HeuristicProcessor:
self.multi_blank = re.compile(r'(\s*
]*>\s*
(\s*]*>\s*
\s*)*){2,}(?!\s*]*>\s*(\s*]*>\s*
\s*)*){2,}', re.IGNORECASE)
self.line_open = (
- r"<(?Pp|div)[^>]*>\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*"
- r"(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*")
- self.line_close = "((?P=inner3)>)?\\s*((?P=inner2)>)?\\s*((?P=inner1)>)?\\s*(?P=outer)>"
+ r'<(?Pp|div)[^>]*>\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*'
+ r'(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*')
+ self.line_close = '((?P=inner3)>)?\\s*((?P=inner2)>)?\\s*((?P=inner1)>)?\\s*(?P=outer)>'
self.single_blank = re.compile(r'(\s*<(p|div)[^>]*>\s*(p|div)>)', re.IGNORECASE)
self.scene_break_open = ''
- self.common_in_text_endings = '[\"\'—’”,\\.!\\?\\…\\)„\\w]'
- self.common_in_text_beginnings = '[\\w\'\"“‘‛]'
+ self.common_in_text_endings = '["\'—’”,\\.!\\?\\…\\)„\\w]'
+ self.common_in_text_beginnings = '[\\w\'"“‘‛]'
def is_pdftohtml(self, src):
- return '' in src[:1000]
+ return "" in src[:1000]
def is_abbyy(self, src):
return ''+chap+'\n'
else:
delete_whitespace = re.compile('^\\s*(?P.*?)\\s*$')
- delete_quotes = re.compile('\'\"')
+ delete_quotes = re.compile('\'"')
txt_chap = delete_quotes.sub('', delete_whitespace.sub('\\g', html2text(chap)))
txt_title = delete_quotes.sub('', delete_whitespace.sub('\\g', html2text(title)))
self.html_preprocess_sections = self.html_preprocess_sections + 1
- self.log.debug("marked " + str(self.html_preprocess_sections) +
- " chapters & titles. - " + str(chap) + ", " + str(title))
+ self.log.debug('marked ' + str(self.html_preprocess_sections) +
+ ' chapters & titles. - ' + str(chap) + ', ' + str(title))
return ''+chap+'
\n'+title+'
\n'
def chapter_break(self, match):
chap = match.group('section')
styles = match.group('styles')
self.html_preprocess_sections = self.html_preprocess_sections + 1
- self.log.debug("marked " + str(self.html_preprocess_sections) +
- " section markers based on punctuation. - " + str(chap))
+ self.log.debug('marked ' + str(self.html_preprocess_sections) +
+ ' section markers based on punctuation. - ' + str(chap))
return '<'+styles+' style="page-break-before:always">'+chap
def analyze_title_matches(self, match):
@@ -208,59 +208,59 @@ class HeuristicProcessor:
if wordcount > 200000:
typical_chapters = 15000.
self.min_chapters = int(ceil(wordcount / typical_chapters))
- self.log.debug("minimum chapters required are: "+str(self.min_chapters))
+ self.log.debug('minimum chapters required are: '+str(self.min_chapters))
heading = re.compile(']*>', re.IGNORECASE)
self.html_preprocess_sections = len(heading.findall(html))
- self.log.debug("found " + str(self.html_preprocess_sections) + " pre-existing headings")
+ self.log.debug('found ' + str(self.html_preprocess_sections) + ' pre-existing headings')
# Build the Regular Expressions in pieces
- init_lookahead = "(?=<(p|div))"
+ init_lookahead = '(?=<(p|div))'
chapter_line_open = self.line_open
- title_line_open = (r"<(?Pp|div)[^>]*>\s*(<(?Pfont|span|[ibu])[^>]*>)?"
- r"\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*")
- chapter_header_open = r"(?P"
- title_header_open = r"(?P"
- chapter_header_close = ")\\s*"
- title_header_close = ")"
+ title_line_open = (r'<(?Pp|div)[^>]*>\s*(<(?Pfont|span|[ibu])[^>]*>)?'
+ r'\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*(<(?Pfont|span|[ibu])[^>]*>)?\s*')
+ chapter_header_open = r'(?P'
+ title_header_open = r'(?P'
+ chapter_header_close = ')\\s*'
+ title_header_close = ')'
chapter_line_close = self.line_close
- title_line_close = "((?P=inner6)>)?\\s*((?P=inner5)>)?\\s*((?P=inner4)>)?\\s*(?P=outer2)>"
+ title_line_close = '((?P=inner6)>)?\\s*((?P=inner5)>)?\\s*((?P=inner4)>)?\\s*(?P=outer2)>'
is_pdftohtml = self.is_pdftohtml(html)
if is_pdftohtml:
- title_line_open = "<(?Pp)[^>]*>\\s*"
- title_line_close = "\\s*(?P=outer2)>"
+ title_line_open = '<(?Pp)[^>]*>\\s*'
+ title_line_close = '\\s*(?P=outer2)>'
if blanks_between_paragraphs:
- blank_lines = "(\\s*]*>\\s*
){0,2}\\s*"
+ blank_lines = '(\\s*]*>\\s*
){0,2}\\s*'
else:
- blank_lines = ""
- opt_title_open = "("
- opt_title_close = ")?"
- n_lookahead_open = "(?!\\s*"
- n_lookahead_close = ")\\s*"
+ blank_lines = ''
+ opt_title_open = '('
+ opt_title_close = ')?'
+ n_lookahead_open = '(?!\\s*'
+ n_lookahead_close = ')\\s*'
default_title = r"(<[ibu][^>]*>)?\s{0,3}(?!Chapter)([\w\:\'’\"-]+\s{0,3}){1,5}?([ibu][^>]*>)?(?=<)"
- simple_title = r"(<[ibu][^>]*>)?\s{0,3}(?!(Chapter|\s+<)).{0,65}?([ibu][^>]*>)?(?=<)"
+ simple_title = r'(<[ibu][^>]*>)?\s{0,3}(?!(Chapter|\s+<)).{0,65}?([ibu][^>]*>)?(?=<)'
analysis_result = []
chapter_types = [
[(
r"[^'\"]?(Introduction|Synopsis|Acknowledgements|Epilogue|CHAPTER|Kapitel|Volume\b|Prologue|Book\b|Part\b|Dedication|Preface)"
- r"\s*([\d\w-]+\:?\'?\s*){0,5}"), True, True, True, False, "Searching for common section headings", 'common'],
+ r"\s*([\d\w-]+\:?\'?\s*){0,5}"), True, True, True, False, 'Searching for common section headings', 'common'],
# Highest frequency headings which include titles
- [r"[^'\"]?(CHAPTER|Kapitel)\s*([\dA-Z\-\'\"\?!#,]+\s*){0,7}\s*", True, True, True, False, "Searching for most common chapter headings", 'chapter'],
- [r"]*>\s*(]*>)?\s*(?!([*#•=]+\s*)+)(\s*(?=[\d.\w#\-*\s]+<)([\d.\w#-*]+\s*){1,5}\s*)(?!\.)()?\s*",
- True, True, True, False, "Searching for emphasized lines", 'emphasized'], # Emphasized lines
+ [r"[^'\"]?(CHAPTER|Kapitel)\s*([\dA-Z\-\'\"\?!#,]+\s*){0,7}\s*", True, True, True, False, 'Searching for most common chapter headings', 'chapter'],
+ [r']*>\s*(]*>)?\s*(?!([*#•=]+\s*)+)(\s*(?=[\d.\w#\-*\s]+<)([\d.\w#-*]+\s*){1,5}\s*)(?!\.)()?\s*',
+ True, True, True, False, 'Searching for emphasized lines', 'emphasized'], # Emphasized lines
[r"[^'\"]?(\d+(\.|:))\s*([\w\-\'\"#,]+\s*){0,7}\s*", True, True, True, False,
- "Searching for numeric chapter headings", 'numeric'], # Numeric Chapters
- [r"([A-Z]\s+){3,}\s*([\d\w-]+\s*){0,3}\s*", True, True, True, False, "Searching for letter spaced headings", 'letter_spaced'], # Spaced Lettering
+ 'Searching for numeric chapter headings', 'numeric'], # Numeric Chapters
+ [r'([A-Z]\s+){3,}\s*([\d\w-]+\s*){0,3}\s*', True, True, True, False, 'Searching for letter spaced headings', 'letter_spaced'], # Spaced Lettering
[r"[^'\"]?(\d+\.?\s+([\d\w-]+\:?\'?-?\s?){0,5})\s*", True, True, True, False,
- "Searching for numeric chapters with titles", 'numeric_title'], # Numeric Titles
+ 'Searching for numeric chapters with titles', 'numeric_title'], # Numeric Titles
[r"[^'\"]?(\d+)\s*([\dA-Z\-\'\"\?!#,]+\s*){0,7}\s*", True, True, True, False,
- "Searching for simple numeric headings", 'plain_number'], # Numeric Chapters, no dot or colon
+ 'Searching for simple numeric headings', 'plain_number'], # Numeric Chapters, no dot or colon
[r"\s*[^'\"]?([A-Z#]+(\s|-){0,3}){1,5}\s*", False, True, False, False,
- "Searching for chapters with Uppercase Characters", 'uppercase'] # Uppercase Chapters
+ 'Searching for chapters with Uppercase Characters', 'uppercase'] # Uppercase Chapters
]
def recurse_patterns(html, analyze):
@@ -299,9 +299,9 @@ class HeuristicProcessor:
break
full_chapter_line = chapter_line_open+chapter_header_open+chapter_type+chapter_header_close+chapter_line_close
if n_lookahead_req:
- n_lookahead = re.sub("(ou|in|cha)", "lookahead_", full_chapter_line)
+ n_lookahead = re.sub('(ou|in|cha)', 'lookahead_', full_chapter_line)
if not analyze:
- self.log.debug("Marked " + str(self.html_preprocess_sections) + " headings, " + log_message)
+ self.log.debug('Marked ' + str(self.html_preprocess_sections) + ' headings, ' + log_message)
chapter_marker = arg_ignorecase+init_lookahead+full_chapter_line+blank_lines+lp_n_lookahead_open+n_lookahead+lp_n_lookahead_close+ \
lp_opt_title_open+title_line_open+title_header_open+lp_title+title_header_close+title_line_close+lp_opt_title_close
@@ -315,10 +315,10 @@ class HeuristicProcessor:
title_req = True
strict_title = False
self.log.debug(
- str(type_name)+" had "+str(hits)+
- " hits - "+str(self.chapters_no_title)+" chapters with no title, "+
- str(self.chapters_with_title)+" chapters with titles, "+
- str(float(self.chapters_with_title) / float(hits))+" percent. ")
+ str(type_name)+' had '+str(hits)+
+ ' hits - '+str(self.chapters_no_title)+' chapters with no title, '+
+ str(self.chapters_with_title)+' chapters with titles, '+
+ str(float(self.chapters_with_title) / float(hits))+' percent. ')
if type_name == 'common':
analysis_result.append([chapter_type, n_lookahead_req, strict_title, ignorecase, title_req, log_message, type_name])
elif self.min_chapters <= hits < max_chapters or self.min_chapters < 3 > hits:
@@ -335,8 +335,8 @@ class HeuristicProcessor:
words_per_chptr = wordcount
if words_per_chptr > 0 and self.html_preprocess_sections > 0:
words_per_chptr = wordcount // self.html_preprocess_sections
- self.log.debug("Total wordcount is: "+ str(wordcount)+", Average words per section is: "+
- str(words_per_chptr)+", Marked up "+str(self.html_preprocess_sections)+" chapters")
+ self.log.debug('Total wordcount is: '+ str(wordcount)+', Average words per section is: '+
+ str(words_per_chptr)+', Marked up '+str(self.html_preprocess_sections)+' chapters')
return html
def punctuation_unwrap(self, length, content, format):
@@ -366,13 +366,13 @@ class HeuristicProcessor:
# define the pieces of the regex
# (?(span|[iub])>)?\\s*((p|div)>)?"
- blanklines = "\\s*(?P<(p|span|div)[^>]*>\\s*(<(p|span|div)[^>]*>\\s*(span|p|div)>\\s*)(span|p|div)>\\s*){0,3}\\s*"
- line_opening = "<(p|div)[^>]*>\\s*(?P<(span|[iub])[^>]*>)?\\s*"
- txt_line_wrap = "((\u0020|\u0009)*\n){1,4}"
+ lookahead = '(?<=.{'+str(length)+r'}([a-zა-ჰäëïöüàèìòùáćéíĺóŕńśúýźâêîôûçąężłıãõñæøþðßěľščťžňďřůёђєіїјљњћўџѣа-я,:)\\IAß]|(?(span|[iub])>)?\\s*((p|div)>)?'
+ blanklines = '\\s*(?P<(p|span|div)[^>]*>\\s*(<(p|span|div)[^>]*>\\s*(span|p|div)>\\s*)(span|p|div)>\\s*){0,3}\\s*'
+ line_opening = '<(p|div)[^>]*>\\s*(?P<(span|[iub])[^>]*>)?\\s*'
+ txt_line_wrap = '((\u0020|\u0009)*\n){1,4}'
if format == 'txt':
unwrap_regex = lookahead+txt_line_wrap
@@ -383,9 +383,9 @@ class HeuristicProcessor:
em_en_unwrap_regex = em_en_lookahead+line_ending+blanklines+line_opening
shy_unwrap_regex = soft_hyphen+line_ending+blanklines+line_opening
- unwrap = re.compile("%s" % unwrap_regex, re.UNICODE)
- em_en_unwrap = re.compile("%s" % em_en_unwrap_regex, re.UNICODE)
- shy_unwrap = re.compile("%s" % shy_unwrap_regex, re.UNICODE)
+ unwrap = re.compile('%s' % unwrap_regex, re.UNICODE)
+ em_en_unwrap = re.compile('%s' % em_en_unwrap_regex, re.UNICODE)
+ shy_unwrap = re.compile('%s' % shy_unwrap_regex, re.UNICODE)
if format == 'txt':
content = unwrap.sub(' ', content)
@@ -408,7 +408,7 @@ class HeuristicProcessor:
def markup_pre(self, html):
pre = re.compile(r'', re.IGNORECASE)
if len(pre.findall(html)) >= 1:
- self.log.debug("Running Text Processing")
+ self.log.debug('Running Text Processing')
outerhtml = re.compile(r'.*?(?<=)(?P.*?)
', re.IGNORECASE|re.DOTALL)
html = outerhtml.sub(self.txt_process, html)
from calibre.ebooks.conversion.preprocess import convert_entities
@@ -422,15 +422,15 @@ class HeuristicProcessor:
return html
def arrange_htm_line_endings(self, html):
- html = re.sub(r"\s*(?Pp|div)>", ""+"\\g"+">\n", html)
- html = re.sub(r"\s*<(?Pp|div)(?P