Content server: Allow managing the data files associated with a book by clicking the three dots in the top right corner of the book's page and choosing "Manage data files". Fixes #2091646 [Download the Data Files from the web browser](https://bugs.launchpad.net/calibre/+bug/2091646)

This commit is contained in:
Kovid Goyal 2024-12-18 11:42:10 +05:30
parent 89e88b9678
commit 0177afafe6
No known key found for this signature in database
GPG Key ID: 06BC317B515ACE7C
6 changed files with 338 additions and 6 deletions

View File

@ -2058,6 +2058,33 @@ class DB:
with src:
yield relpath, src, stat_result
def remove_extra_files(self, book_path, relpaths, permanent):
bookdir = os.path.join(self.library_path, book_path)
errors = {}
for relpath in relpaths:
path = os.path.abspath(os.path.join(bookdir, relpath))
if not self.normpath(path).startswith(self.normpath(bookdir)):
continue
try:
if permanent:
try:
os.remove(make_long_path_useable(path))
except FileNotFoundError:
pass
except Exception:
if not iswindows:
raise
time.sleep(1)
os.remove(make_long_path_useable(path))
else:
from calibre.utils.recycle_bin import recycle
recycle(make_long_path_useable(path))
except Exception as e:
import traceback
traceback.print_exc()
errors[relpath] = e
return errors
def rename_extra_file(self, relpath, newrelpath, book_path, replace=True):
bookdir = os.path.join(self.library_path, book_path)
src = os.path.abspath(os.path.join(bookdir, relpath))
@ -2077,6 +2104,8 @@ class DB:
def add_extra_file(self, relpath, stream, book_path, replace=True, auto_rename=False):
bookdir = os.path.join(self.library_path, book_path)
dest = os.path.abspath(os.path.join(bookdir, relpath))
if not self.normpath(dest).startswith(self.normpath(bookdir)):
return None
if not replace and os.path.exists(make_long_path_useable(dest)):
if not auto_rename:
return None

View File

@ -20,7 +20,7 @@ from io import DEFAULT_BUFFER_SIZE, BytesIO
from queue import Queue
from threading import Lock
from time import mktime, monotonic, sleep, time
from typing import NamedTuple, Optional, Tuple
from typing import Iterable, NamedTuple, Optional, Tuple
from calibre import as_unicode, detect_ncpus, isbytestring
from calibre.constants import iswindows, preferred_encoding
@ -3357,6 +3357,17 @@ class Cache:
self._clear_extra_files_cache(dest_id)
return added
@write_api
def remove_extra_files(self, book_id: int, relpaths: Iterable[str], permanent=False) -> dict[str, Exception | None]:
'''
Delete the specified extra files, either to Recycle Bin or permanently.
'''
path = self._field_for('path', book_id)
if path:
self._clear_extra_files_cache(book_id)
return self.backend.remove_extra_files(path, relpaths, permanent)
return dict.fromkeys(relpaths)
@read_api
def list_extra_files(self, book_id, use_cache=False, pattern='') -> Tuple[ExtraFile, ...]:
'''

View File

@ -16,7 +16,7 @@ from threading import Lock
from calibre import fit_image, guess_type, sanitize_file_name
from calibre.constants import config_dir, iswindows
from calibre.db.constants import RESOURCE_URL_SCHEME
from calibre.db.constants import DATA_DIR_NAME, DATA_FILE_PATTERN, RESOURCE_URL_SCHEME
from calibre.db.errors import NoSuchFormat
from calibre.ebooks.covers import cprefs, generate_cover, override_prefs, scale_cover, set_use_roman
from calibre.ebooks.metadata import authors_to_string
@ -24,6 +24,7 @@ from calibre.ebooks.metadata.meta import set_metadata
from calibre.ebooks.metadata.opf2 import metadata_to_opf
from calibre.library.save_to_disk import find_plugboard
from calibre.srv.errors import BookNotFound, HTTPBadRequest, HTTPNotFound
from calibre.srv.metadata import encode_stat_result
from calibre.srv.routes import endpoint, json
from calibre.srv.utils import get_db, get_use_roman, http_date
from calibre.utils.config_base import tweaks
@ -34,7 +35,8 @@ from calibre.utils.localization import _
from calibre.utils.resources import get_image_path as I
from calibre.utils.resources import get_path as P
from calibre.utils.shared_file import share_open
from polyglot.binary import as_hex_unicode
from calibre.utils.speedups import ReadOnlyFileBuffer
from polyglot.binary import as_hex_unicode, from_base64_bytes
from polyglot.urllib import quote
plugboard_content_server_value = 'content_server'
@ -518,3 +520,72 @@ def set_note(ctx, rd, field, item_id, library_id):
db.set_notes_for(field, item_id, db_html, searchable_text, resources)
rd.outheaders['Content-Type'] = 'text/html; charset=UTF-8'
return srv_html
def data_file(rd, fname, path, stat_result):
cd = rd.query.get('content_disposition', 'attachment')
rd.outheaders['Content-Disposition'] = '''{}; filename="{}"; filename*=utf-8''{}'''.format(
cd, fname_for_content_disposition(fname), fname_for_content_disposition(fname, as_encoded_unicode=True))
return rd.filesystem_file_with_custom_etag(share_open(path, 'rb'), stat_result.st_dev, stat_result.st_ino, stat_result.st_size, stat_result.st_mtime)
@endpoint('/data-files/get/{book_id}/{relpath}/{library_id=None}', types={'book_id': int})
def get_data_file(ctx, rd, book_id, relpath, library_id):
db = get_db(ctx, rd, library_id)
if db is None:
raise HTTPNotFound(f'Library {library_id} not found')
for ef in db.list_extra_files(book_id, pattern=DATA_FILE_PATTERN):
if ef.relpath == relpath:
return data_file(rd, relpath.rpartition('/')[2], ef.file_path, ef.stat_result)
raise HTTPNotFound(f'No data file {relpath} in book {book_id} in library {library_id}')
def strerr(e: Exception):
# Dont leak the filepath in the error response
if isinstance(e, OSError):
return e.strerror or str(e)
return str(e)
@endpoint('/data-files/upload/{book_id}/{library_id=None}', needs_db_write=True, methods={'POST'}, types={'book_id': int}, postprocess=json)
def upload_data_files(ctx, rd, book_id, library_id):
db = get_db(ctx, rd, library_id)
if db is None:
raise HTTPNotFound(f'Library {library_id} not found')
files = {}
try:
recvd = load_json_file(rd.request_body_file)
for x in recvd:
data = from_base64_bytes(x['data_url'].split(',', 1)[-1])
relpath = f'{DATA_DIR_NAME}/{x["name"]}'
files[relpath] = ReadOnlyFileBuffer(data, x['name'])
except Exception as err:
raise HTTPBadRequest(f'Invalid query: {err}')
err = ''
try:
db.add_extra_files(book_id, files)
except Exception as e:
err = strerr(e)
data_files = db.list_extra_files(book_id, use_cache=False, pattern=DATA_FILE_PATTERN)
return {'error': err, 'data_files': {e.relpath: encode_stat_result(e.stat_result) for e in data_files}}
@endpoint('/data-files/remove/{book_id}/{library_id=None}', needs_db_write=True, methods={'POST'}, types={'book_id': int}, postprocess=json)
def remove_data_files(ctx, rd, book_id, library_id):
db = get_db(ctx, rd, library_id)
if db is None:
raise HTTPNotFound(f'Library {library_id} not found')
try:
relpaths = load_json_file(rd.request_body_file)
if not isinstance(relpaths, list):
raise Exception('files to remove must be a list')
except Exception as err:
raise HTTPBadRequest(f'Invalid query: {err}')
errors = db.remove_extra_files(book_id, relpaths, permanent=True)
data_files = db.list_extra_files(book_id, use_cache=False, pattern=DATA_FILE_PATTERN)
ans = {'data_files': {e.relpath: encode_stat_result(e.stat_result) for e in data_files}}
if errors:
ans['errors'] = {k: strerr(v) for k, v in errors.items() if v is not None}
return ans

View File

@ -11,6 +11,7 @@ from threading import Lock
from calibre.constants import config_dir
from calibre.db.categories import Tag, category_display_order
from calibre.db.constants import DATA_FILE_PATTERN
from calibre.ebooks.metadata.sources.identify import urls_from_identifiers
from calibre.library.comments import comments_to_html, markdown
from calibre.library.field_metadata import category_icon_map
@ -64,6 +65,12 @@ def add_field(field, db, book_id, ans, field_metadata):
ans[field] = val
def encode_stat_result(s: os.stat_result) -> dict[str, int]:
return {
'size': s.st_size, 'mtime_ns': s.st_mtime_ns,
}
def book_as_json(db, book_id):
db = db.new_api
with db.safe_read_lock:
@ -94,6 +101,9 @@ def book_as_json(db, book_id):
x = db.items_with_notes_in_book(book_id)
if x:
ans['items_with_notes'] = {field: {v: k for k, v in items.items()} for field, items in x.items()}
data_files = db.list_extra_files(book_id, use_cache=True, pattern=DATA_FILE_PATTERN)
if data_files:
ans['data_files'] = {e.relpath: encode_stat_result(e.stat_result) for e in data_files}
return ans

View File

@ -8,11 +8,11 @@ import traceback
from ajax import ajax, ajax_send, encode_query_component
from book_list.delete_book import refresh_after_delete, start_delete_book
from book_list.globals import get_session_data
from book_list.item_list import create_item, create_item_list
from book_list.item_list import create_item, create_item_list, create_side_action
from book_list.library_data import (
all_libraries, book_after, book_metadata, cover_url, current_library_id,
current_virtual_library, download_url, library_data, load_status,
set_book_metadata
set_book_metadata, download_data_file_url
)
from book_list.router import back, home, open_book, report_a_load_failure, show_note
from book_list.theme import (
@ -23,8 +23,9 @@ from book_list.ui import query_as_href, set_panel_handler, show_panel
from book_list.views import search_query_for
from date import format_date
from dom import add_extra_css, build_rule, clear, ensure_id, svgicon, unique_id
from file_uploads import update_status_widget, upload_files_widget, upload_status_widget
from gettext import gettext as _
from modals import create_custom_dialog, error_dialog, warning_dialog
from modals import create_custom_dialog, error_dialog, warning_dialog, question_dialog
from read_book.touch import (
copy_touch, install_handlers, interpret_single_gesture, touch_id, update_touch
)
@ -40,6 +41,7 @@ bd_counter = 0
CLASS_NAME = 'book-details-panel'
SEARCH_INTERNET_CLASS = 'book-details-search-internet'
COPY_TO_LIBRARY_CLASS = 'book-details-copy-to-library'
DATA_FILES_CLASS = 'book-details-data-files'
FORMAT_PRIORITIES = [
'EPUB', 'AZW3', 'DOCX', 'LIT', 'MOBI', 'ODT', 'RTF', 'MD', 'MARKDOWN', 'TXT', 'PDF'
]
@ -469,6 +471,9 @@ add_extra_css(def():
sel = '.' + COPY_TO_LIBRARY_CLASS
style += build_rule(sel, margin='1ex 1em')
sel = '.' + DATA_FILES_CLASS
style += build_rule(sel, margin='1ex 1em')
return style
)
@ -827,6 +832,11 @@ def create_more_actions_panel(container_id):
action=def():
show_subsequent_panel('copy_to_library', replace=True)
),
create_item(_('Manage data files'), subtitle=_('Upload or download additional data files to this book'),
action=def():
show_subsequent_panel('data_files', replace=True)
),
]
container.appendChild(E.div())
create_item_list(container.lastChild, items)
@ -849,6 +859,196 @@ def url_for(template, data):
return template.format(title=eqc(data.title), author=eqc(data.author))
EXTENSIONS_TO_OPEN_IN_BROWSER = {
'pdf': True,
'jpeg': True,
'jpg': True,
'png': True,
'gif': True,
'webp': True,
'txt': True,
'html': True,
'htm': True,
'xhtml': True,
}
def download_data_file(book_id, relpath):
ext = relpath.toLowerCase().rpartition('.')[2]
open_inline = bool(EXTENSIONS_TO_OPEN_IN_BROWSER[ext])
cd = 'inline' if open_inline else 'attachment'
url = download_data_file_url(book_id, relpath, cd)
if open_inline:
window.open(url)
else:
window.location = url
def on_progress(container_id, book_id, loaded, total, xhr):
container = document.getElementById(container_id)
if container and total:
update_status_widget(container, loaded, total)
def data_files_submitted(container_id, book_id, end_type, xhr, ev):
if end_type is 'abort':
back()
return
if end_type is not 'load':
error_dialog(_('Failed to upload files to server'), _(
'Uploading data files for book: {} failed.').format(book_id), xhr.error_html)
return
try:
res = JSON.parse(xhr.responseText)
except Exception as err:
error_dialog(_('Could not upload data files for book'), _('Server returned an invalid response'), err.toString())
return
mi = book_metadata(book_id)
if mi:
mi.data_files = res.data_files
if res.error:
error_dialog(_('Could not upload data files for book'), _('There was an error: ') + res.error)
else:
back()
def submit_data_files(top_container_id, container_id, book_id, added):
c = document.getElementById(container_id)
clear(c)
c.appendChild(E.div(style='margin: 1ex 1rem', _('Uploading changes to server, please wait...')))
w = upload_status_widget()
c.appendChild(w)
ajax_send(
f'data-files/upload/{book_id}/{current_library_id()}', added,
data_files_submitted.bind(None, container_id, book_id), on_progress.bind(None, container_id, book_id))
def files_added(check_existing, top_container_id, book_id, container_id, files):
container = document.getElementById(container_id)
mi = book_metadata(book_id)
if not container or not mi or not files[0]:
return
added = v'[]'
def exists(fname):
for relpath in mi.data_files:
q = relpath.partition('/')[2]
if q is fname:
return True
return False
if check_existing:
existing = [file.name for file in files if exists(file.name)]
if existing.length:
return question_dialog(_('Replace existing data files?'), _('The following data files already exist, are you sure you want to replace them?') + ' ' + existing.join(', '), def (yes):
if yes:
files_added(False, top_container_id, book_id, container_id, files)
else:
back()
)
for file in files:
data = {'name': file.name, 'size': file.size, 'type': file.type, 'data_url': None}
added.push(data)
r = FileReader()
r.onload = def(evt):
data.data_url = evt.target.result
for entry in added:
if not entry.data_url:
return
submit_data_files(top_container_id, container_id, book_id, added)
r.readAsDataURL(file)
def upload_data_file(container_id):
if not render_book.book_id or not book_metadata(render_book.book_id):
return return_to_book_details()
book_id = render_book.book_id
container = document.getElementById(container_id)
create_top_bar(container, title=_('Upload data files'), action=back, icon='close')
div = E.div(id=unique_id())
container.appendChild(div)
upload_files_widget(div, files_added.bind(None, True, container_id, book_id), _(
'Upload files by <a>selecting the files</a> or drag and drop of the files here.'),
single_file=False)
def rebuild_data_files_list(container_id, book_id, relpath_being_deleted):
def delete_data_file(relpath, fname, ev):
question_dialog(_('Are you sure?'), _('Do you want to permanently delete the data file {0} from the book {1}?').format(
fname, mi.title), def(yes):
if yes:
data = v'[relpath]'
ajax_send(f'data-files/remove/{book_id}/{current_library_id()}',
data, data_file_deleted.bind(None, container_id, book_id, relpath))
rebuild_data_files_list(container_id, book_id, relpath_being_deleted)
)
def ddf(relpath, fname, ev):
download_data_file(book_id, relpath)
def upload_data_file(ev):
show_subsequent_panel('upload_data_file', replace=False)
container = document.getElementById(container_id)
mi = book_metadata(book_id)
if not container or not mi:
return
container = container.querySelector('[data-component=data-files-list-container]')
clear(container)
items = [create_item(_('Upload new data file'), icon='plus', action=upload_data_file)]
if mi.data_files:
fname_map = {relpath: relpath.partition('/')[2] for relpath in mi.data_files}
df = sorted(Object.keys(fname_map), key=def(x): return fname_map[x].toLowerCase();)
for relpath in df:
fname = fname_map[relpath]
being_deleted = relpath is relpath_being_deleted
subtitle = None
side_actions = []
if being_deleted:
subtitle = _('This file is being deleted')
else:
side_actions.push(create_side_action(
'trash', tooltip=_('Delete the file: {}').format(fname), action=delete_data_file.bind(None, relpath, fname)
))
items.push(create_item(fname, icon='cloud-download', subtitle=subtitle, side_actions=side_actions, action=ddf.bind(None, relpath, fname)))
create_item_list(container, items)
def data_file_deleted(container_id, book_id, relpath, end_type, xhr, ev):
if end_type is 'abort':
back()
return
if end_type is not 'load':
error_dialog(_('Failed to delete data file from server'), _(
'Deleting data file for book: {} failed.').format(book_id), xhr.error_html)
return
try:
res = JSON.parse(xhr.responseText)
except Exception as err:
error_dialog(_('Could not delete data file for book'), _('Server returned an invalid response'), err.toString())
return
mi = book_metadata(book_id)
if mi:
mi.data_files = res.data_files
if res.errors:
error_dialog(_('Failed to delete data file'), _('Failed to delete data file: {}').format(relpath), res.errors[relpath])
rebuild_data_files_list(container_id, book_id)
def data_files(container_id):
if not render_book.book_id or not book_metadata(render_book.book_id):
return return_to_book_details()
book_id = render_book.book_id
container = document.getElementById(container_id)
create_top_bar(container, title=_('Manage data files'), action=back, icon='close')
container.appendChild(E.div(class_=DATA_FILES_CLASS))
container = container.lastChild
container.appendChild(E.div(data_component='data-files-list-container'))
rebuild_data_files_list(container_id, book_id)
def search_internet(container_id):
if not render_book.book_id or not book_metadata(render_book.book_id):
return return_to_book_details()
@ -1013,3 +1213,5 @@ set_panel_handler('book_details', init)
set_panel_handler('book_details^more_actions', create_more_actions_panel)
set_panel_handler('book_details^search_internet', search_internet)
set_panel_handler('book_details^copy_to_library', copy_to_library)
set_panel_handler('book_details^data_files', data_files)
set_panel_handler('book_details^upload_data_file', upload_data_file)

View File

@ -224,6 +224,15 @@ def download_url(book_id, fmt, content_disposition):
return ans
def download_data_file_url(book_id, relpath, content_disposition):
lid = current_library_id()
rpath = encodeURIComponent(relpath)
ans = absolute_path(f'data-files/get/{book_id}/{rpath}/{lid}')
if content_disposition:
ans += f'?content_disposition={content_disposition}'
return ans
def book_metadata(book_id):
return library_data.metadata[book_id]