mirror of
https://github.com/kovidgoyal/calibre.git
synced 2026-02-01 08:33:30 -05:00
Start work on GitHub AI backend
This commit is contained in:
parent
c6cf94f0a3
commit
e8712f26f1
20
src/calibre/ai/github/__init__.py
Normal file
20
src/calibre/ai/github/__init__.py
Normal file
@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env python
|
||||
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
|
||||
|
||||
from calibre.customize import AIProviderPlugin
|
||||
|
||||
|
||||
class GitHubAI(AIProviderPlugin):
|
||||
name = 'GitHubAI'
|
||||
version = (1, 0, 0)
|
||||
description = _('AI services from GitHub, with access to many different AI models')
|
||||
author = 'Kovid Goyal'
|
||||
builtin_live_module_name = 'calibre.ai.github.backend'
|
||||
|
||||
@property
|
||||
def capabilities(self):
|
||||
from calibre.ai import AICapabilities
|
||||
return (
|
||||
AICapabilities.text_to_text | AICapabilities.text_to_image | AICapabilities.text_and_image_to_image |
|
||||
AICapabilities.embedding
|
||||
)
|
||||
263
src/calibre/ai/github/backend.py
Normal file
263
src/calibre/ai/github/backend.py
Normal file
@ -0,0 +1,263 @@
|
||||
#!/usr/bin/env python
|
||||
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
|
||||
|
||||
import json
|
||||
import os
|
||||
from collections.abc import Iterable, Iterator
|
||||
from functools import lru_cache
|
||||
from typing import Any, NamedTuple
|
||||
from urllib.request import Request
|
||||
|
||||
from calibre.ai import AICapabilities, ChatMessage, ChatMessageType, ChatResponse, Citation, NoAPIKey, PromptBlocked, ResultBlocked, WebLink
|
||||
from calibre.ai.github import GitHubAI
|
||||
from calibre.ai.prefs import decode_secret, pref_for_provider
|
||||
from calibre.ai.utils import chat_with_error_handler, develop_text_chat, get_cached_resource, read_streaming_response
|
||||
from calibre.constants import cache_dir
|
||||
|
||||
module_version = 1 # needed for live updates
|
||||
MODELS_URL = 'https://models.github.ai/catalog/models'
|
||||
API_VERSION = '2022-11-28'
|
||||
|
||||
|
||||
def pref(key: str, defval: Any = None) -> Any:
|
||||
return pref_for_provider(GitHubAI.name, key, defval)
|
||||
|
||||
|
||||
def api_key() -> str:
|
||||
return pref('api_key')
|
||||
|
||||
|
||||
def is_ready_for_use() -> bool:
|
||||
return bool(api_key())
|
||||
|
||||
|
||||
def decoded_api_key() -> str:
|
||||
ans = api_key()
|
||||
if not ans:
|
||||
raise NoAPIKey('Personal access token required for GitHub AI')
|
||||
return decode_secret(ans)
|
||||
|
||||
|
||||
@lru_cache(2)
|
||||
def headers() -> tuple[tuple[str, str]]:
|
||||
api_key = decoded_api_key()
|
||||
return (
|
||||
('Authorization', f'Bearer {api_key}'),
|
||||
('X-GitHub-Api-Version', API_VERSION),
|
||||
('Accept', 'application/vnd.github+json'),
|
||||
('Content-Type', 'application/json'),
|
||||
)
|
||||
|
||||
|
||||
class Model(NamedTuple):
|
||||
# See https://ai.google.dev/api/models#Model
|
||||
name: str
|
||||
id: str
|
||||
slug: str
|
||||
description: str
|
||||
version: str
|
||||
context_length: int
|
||||
output_token_limit: int
|
||||
capabilities: AICapabilities
|
||||
family: str
|
||||
family_version: float
|
||||
name_parts: tuple[str, ...]
|
||||
thinking: bool
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, x: dict[str, object]) -> 'Model':
|
||||
caps = AICapabilities.text_to_text
|
||||
mid = x['name']
|
||||
if 'embedContent' in x['supportedGenerationMethods']:
|
||||
caps |= AICapabilities.embedding
|
||||
family, family_version = '', 0
|
||||
name_parts = mid.rpartition('/')[-1].split('-')
|
||||
if len(name_parts) > 1:
|
||||
family, fv = name_parts[:2]
|
||||
try:
|
||||
family_version = float(fv)
|
||||
except Exception:
|
||||
family = ''
|
||||
match family:
|
||||
case 'imagen':
|
||||
caps |= AICapabilities.text_to_image
|
||||
case 'gemini':
|
||||
if family_version >= 2.5:
|
||||
caps |= AICapabilities.text_and_image_to_image
|
||||
if 'tts' in name_parts:
|
||||
caps |= AICapabilities.tts
|
||||
return Model(
|
||||
name=x['displayName'], id=mid, description=x.get('description', ''), version=x['version'],
|
||||
context_length=int(x['inputTokenLimit']), output_token_limit=int(x['outputTokenLimit']),
|
||||
capabilities=caps, family=family, family_version=family_version, name_parts=tuple(name_parts),
|
||||
slug=mid, thinking=x.get('thinking', False)
|
||||
)
|
||||
|
||||
|
||||
def parse_models_list(entries: list[dict[str, Any]]) -> dict[str, Model]:
|
||||
ans = {}
|
||||
for entry in entries['models']:
|
||||
e = Model.from_dict(entry)
|
||||
ans[e.id] = e
|
||||
return ans
|
||||
|
||||
|
||||
@lru_cache(2)
|
||||
def get_available_models() -> dict[str, 'Model']:
|
||||
cache_loc = os.path.join(cache_dir(), 'github-ai', 'models-v1.json')
|
||||
data = get_cached_resource(cache_loc, MODELS_URL, headers=headers())
|
||||
return parse_models_list(json.loads(data))
|
||||
|
||||
|
||||
def config_widget():
|
||||
from calibre.ai.github.config import ConfigWidget
|
||||
return ConfigWidget()
|
||||
|
||||
|
||||
def save_settings(config_widget):
|
||||
config_widget.save_settings()
|
||||
|
||||
|
||||
def human_readable_model_name(model_id: str) -> str:
|
||||
if m := get_available_models().get(model_id):
|
||||
model_id = m.name
|
||||
return model_id
|
||||
|
||||
|
||||
def model_choice_for_text() -> Model:
|
||||
m = gemini_models()
|
||||
return m.get(pref('model_strategy', 'medium')) or m['medium']
|
||||
|
||||
|
||||
def chat_request(data: dict[str, Any], model: Model, streaming: bool = True) -> Request:
|
||||
url = f'{API_BASE_URL}/{model.slug}'
|
||||
if streaming:
|
||||
url += ':streamGenerateContent?alt=sse'
|
||||
else:
|
||||
url += ':generateContent'
|
||||
return Request(url, data=json.dumps(data).encode('utf-8'), headers=dict(headers()), method='POST')
|
||||
|
||||
|
||||
def thinking_budget(m: Model) -> int | None:
|
||||
# https://ai.google.dev/gemini-api/docs/thinking#set-budget
|
||||
if not m.thinking:
|
||||
return None
|
||||
limits = 0, 24576
|
||||
if 'pro' in m.name_parts:
|
||||
limits = 128, 32768
|
||||
elif 'lite' in m.name_parts:
|
||||
limits = 512, 24576
|
||||
match pref('reasoning_strategy', 'auto'):
|
||||
case 'auto':
|
||||
return -1
|
||||
case 'none':
|
||||
return limits[0] if 'pro' in m.name_parts else 0
|
||||
case 'low':
|
||||
return max(limits[0], int(0.2 * limits[1]))
|
||||
case 'medium':
|
||||
return max(limits[0], int(0.5 * limits[1]))
|
||||
case 'high':
|
||||
return max(limits[0], int(0.8 * limits[1]))
|
||||
return None
|
||||
|
||||
|
||||
def for_assistant(self: ChatMessage) -> dict[str, Any]:
|
||||
return {'text': self.query}
|
||||
|
||||
|
||||
def as_chat_responses(d: dict[str, Any], model: Model) -> Iterator[ChatResponse]:
|
||||
# See https://ai.google.dev/api/generate-content#generatecontentresponse
|
||||
if pf := d.get('promptFeedback'):
|
||||
if br := pf.get('blockReason'):
|
||||
yield ChatResponse(exception=PromptBlocked(block_reason(br)))
|
||||
return
|
||||
grounding_chunks, grounding_supports = [], []
|
||||
for c in d['candidates']:
|
||||
has_metadata = False
|
||||
cost, currency = 0, ''
|
||||
if fr := c.get('finishReason'):
|
||||
if fr == 'STOP':
|
||||
has_metadata = True
|
||||
cost, currency = model.get_cost(d['usageMetadata'])
|
||||
else:
|
||||
yield ChatResponse(exception=ResultBlocked(result_block_reason(fr)))
|
||||
return
|
||||
content = c['content']
|
||||
if gm := c.get('groundingMetadata'):
|
||||
grounding_chunks.extend(gm['groundingChunks'])
|
||||
grounding_supports.extend(gm['groundingSupports'])
|
||||
citations, web_links = [], []
|
||||
if has_metadata:
|
||||
for x in grounding_chunks:
|
||||
if w := x.get('web'):
|
||||
web_links.append(WebLink(**w))
|
||||
else:
|
||||
web_links.append(WebLink())
|
||||
|
||||
for s in grounding_supports:
|
||||
if links := tuple(i for i in s['groundingChunkIndices'] if web_links[i]):
|
||||
seg = s['segment']
|
||||
citations.append(Citation(
|
||||
links, start_offset=seg.get('startIndex', 0), end_offset=seg.get('endIndex', 0), text=seg.get('text', '')))
|
||||
role = ChatMessageType.user if 'user' == content.get('role') else ChatMessageType.assistant
|
||||
content_parts = []
|
||||
reasoning_parts = []
|
||||
reasoning_details = []
|
||||
for part in content['parts']:
|
||||
if text := part.get('text'):
|
||||
(reasoning_parts if part.get('thought') else content_parts).append(text)
|
||||
if ts := part.get('thoughtSignature'):
|
||||
reasoning_details.append({'signature': ts})
|
||||
yield ChatResponse(
|
||||
type=role, content=''.join(content_parts), reasoning=''.join(reasoning_parts),
|
||||
reasoning_details=tuple(reasoning_details), has_metadata=has_metadata, model=model.id,
|
||||
cost=cost, plugin_name=GitHubAI.name, currency=currency, citations=citations, web_links=web_links,
|
||||
)
|
||||
|
||||
|
||||
def text_chat_implementation(messages: Iterable[ChatMessage], use_model: str = '') -> Iterator[ChatResponse]:
|
||||
# See https://ai.google.dev/gemini-api/docs/text-generation
|
||||
if use_model:
|
||||
model = get_available_models()[use_model]
|
||||
else:
|
||||
model = model_choice_for_text()
|
||||
contents = []
|
||||
system_instructions = []
|
||||
for m in messages:
|
||||
d = system_instructions if m.type is ChatMessageType.system else contents
|
||||
d.append(for_assistant(m))
|
||||
data = {
|
||||
# See https://ai.google.dev/api/generate-content#v1beta.GenerationConfig
|
||||
'generationConfig': {
|
||||
'thinkingConfig': {
|
||||
'includeThoughts': True,
|
||||
},
|
||||
},
|
||||
}
|
||||
if (tb := thinking_budget(model)) is not None:
|
||||
data['generationConfig']['thinkingConfig']['thinkingBudget'] = tb
|
||||
if system_instructions:
|
||||
data['system_instruction'] = {'parts': system_instructions}
|
||||
if contents:
|
||||
data['contents'] = [{'parts': contents}]
|
||||
if pref('allow_web_searches', True):
|
||||
data['tools'] = [{'google_search': {}}]
|
||||
rq = chat_request(data, model)
|
||||
|
||||
for datum in read_streaming_response(rq, GitHubAI.name):
|
||||
yield from as_chat_responses(datum, model)
|
||||
|
||||
|
||||
def text_chat(messages: Iterable[ChatMessage], use_model: str = '') -> Iterator[ChatResponse]:
|
||||
yield from chat_with_error_handler(text_chat_implementation(messages, use_model))
|
||||
|
||||
|
||||
def develop(use_model: str = '', msg: str = '') -> None:
|
||||
# calibre-debug -c 'from calibre.ai.github.backend import develop; develop()'
|
||||
print('\n'.join(f'{k}:{m.id}' for k, m in gemini_models().items()))
|
||||
m = (ChatMessage(msg),) if msg else ()
|
||||
develop_text_chat(text_chat, ('models/' + use_model) if use_model else '', messages=m)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
develop()
|
||||
83
src/calibre/ai/github/config.py
Normal file
83
src/calibre/ai/github/config.py
Normal file
@ -0,0 +1,83 @@
|
||||
#!/usr/bin/env python
|
||||
# License: GPLv3 Copyright: 2025, Kovid Goyal <kovid at kovidgoyal.net>
|
||||
|
||||
|
||||
from functools import partial
|
||||
|
||||
from qt.core import QComboBox, QFormLayout, QLabel, QLineEdit, QWidget
|
||||
|
||||
from calibre.ai.github import GitHubAI
|
||||
from calibre.ai.prefs import decode_secret, encode_secret, pref_for_provider, set_prefs_for_provider
|
||||
from calibre.ai.utils import configure
|
||||
from calibre.gui2 import error_dialog
|
||||
|
||||
pref = partial(pref_for_provider, GitHubAI.name)
|
||||
|
||||
|
||||
class ConfigWidget(QWidget):
|
||||
|
||||
def __init__(self, parent: QWidget | None = None):
|
||||
super().__init__(parent)
|
||||
l = QFormLayout(self)
|
||||
l.setFieldGrowthPolicy(QFormLayout.FieldGrowthPolicy.ExpandingFieldsGrow)
|
||||
la = QLabel('<p>'+_(
|
||||
'You have to create an account at {0}, then generate a <a href="{1}">Personal access token</a>'
|
||||
' with the <code>models:read</code> permission.'
|
||||
' After that, you can use the GitHub AI services a limited number of times a day for free.'
|
||||
' For more extensive use, you will need to setup <a href="{2}">GitHub models billing</a>.'
|
||||
).format(
|
||||
'<a href="https://github.com">GitHub</a>',
|
||||
'https://docs.github.com/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens',
|
||||
'https://docs.github.com/billing/concepts/product-billing/github-models',
|
||||
))
|
||||
la.setWordWrap(True)
|
||||
la.setOpenExternalLinks(True)
|
||||
l.addRow(la)
|
||||
|
||||
self.api_key_edit = a = QLineEdit(self)
|
||||
a.setPlaceholderText(_('A personal access token is required'))
|
||||
l.addRow(_('Access &token:'), a)
|
||||
if key := pref('api_key'):
|
||||
a.setText(decode_secret(key))
|
||||
self.model_strategy = ms = QComboBox(self)
|
||||
l.addRow(_('Model &choice strategy:'), ms)
|
||||
ms.addItem(_('Cheap and fastest'), 'low')
|
||||
ms.addItem(_('Medium'), 'medium')
|
||||
ms.addItem(_('High quality, expensive and slower'), 'high')
|
||||
if strat := pref('model_choice_strategy', 'medium'):
|
||||
ms.setCurrentIndex(max(0, ms.findData(strat)))
|
||||
ms.setToolTip('<p>' + _(
|
||||
'The model choice strategy controls how a model to query is chosen. Cheaper and faster models give lower'
|
||||
' quality results.'
|
||||
))
|
||||
|
||||
@property
|
||||
def api_key(self) -> str:
|
||||
return self.api_key_edit.text().strip()
|
||||
|
||||
@property
|
||||
def model_choice_strategy(self) -> str:
|
||||
return self.model_strategy.currentData()
|
||||
|
||||
@property
|
||||
def settings(self) -> dict[str, str]:
|
||||
return {
|
||||
'api_key': encode_secret(self.api_key), 'model_choice_strategy': self.model_choice_strategy,
|
||||
}
|
||||
|
||||
@property
|
||||
def is_ready_for_use(self) -> bool:
|
||||
return bool(self.api_key)
|
||||
|
||||
def validate(self) -> bool:
|
||||
if self.is_ready_for_use:
|
||||
return True
|
||||
error_dialog(self, _('No API key'), _('You must supply a Personal access token to use GitHub AI.'), show=True)
|
||||
return False
|
||||
|
||||
def save_settings(self):
|
||||
set_prefs_for_provider(GitHubAI.name, self.settings)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
configure(GitHubAI.name)
|
||||
@ -8,6 +8,7 @@ from qt.core import QCheckBox, QComboBox, QFormLayout, QLabel, QLineEdit, QWidge
|
||||
|
||||
from calibre.ai.google import GoogleAI
|
||||
from calibre.ai.prefs import decode_secret, encode_secret, pref_for_provider, set_prefs_for_provider
|
||||
from calibre.ai.utils import configure
|
||||
from calibre.gui2 import error_dialog
|
||||
|
||||
pref = partial(pref_for_provider, GoogleAI.name)
|
||||
@ -100,3 +101,7 @@ class ConfigWidget(QWidget):
|
||||
|
||||
def save_settings(self):
|
||||
set_prefs_for_provider(GoogleAI.name, self.settings)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
configure(GoogleAI.name)
|
||||
|
||||
@ -36,9 +36,10 @@ from qt.core import (
|
||||
from calibre.ai import AICapabilities
|
||||
from calibre.ai.open_router import OpenRouterAI
|
||||
from calibre.ai.prefs import decode_secret, encode_secret, pref_for_provider, set_prefs_for_provider
|
||||
from calibre.ai.utils import configure
|
||||
from calibre.customize.ui import available_ai_provider_plugins
|
||||
from calibre.ebooks.txt.processor import create_markdown_object
|
||||
from calibre.gui2 import Application, error_dialog, gprefs, safe_open_url
|
||||
from calibre.gui2 import error_dialog, gprefs, safe_open_url
|
||||
from calibre.gui2.widgets2 import Dialog
|
||||
from calibre.utils.date import qt_from_dt
|
||||
from calibre.utils.icu import primary_sort_key
|
||||
@ -478,7 +479,4 @@ class ConfigWidget(QWidget):
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
app = Application([])
|
||||
d = ChooseModel()
|
||||
d.exec()
|
||||
print(d.model_id)
|
||||
configure(OpenRouterAI.name)
|
||||
|
||||
@ -19,6 +19,7 @@ from urllib.request import ProxyHandler, Request, build_opener
|
||||
from calibre import get_proxies
|
||||
from calibre.ai import ChatMessage, ChatMessageType, ChatResponse, Citation, WebLink
|
||||
from calibre.constants import __version__
|
||||
from calibre.customize.ui import available_ai_provider_plugins
|
||||
|
||||
|
||||
def atomic_write(path, data):
|
||||
@ -285,6 +286,35 @@ def develop_text_chat(
|
||||
pprint(msg)
|
||||
|
||||
|
||||
def configure(plugin_name: str, parent: Any = None) -> None:
|
||||
from qt.core import QDialog, QDialogButtonBox, QVBoxLayout
|
||||
|
||||
from calibre.gui2 import ensure_app
|
||||
ensure_app(headless=False)
|
||||
for plugin in available_ai_provider_plugins():
|
||||
if plugin.name == plugin_name:
|
||||
cw = plugin.config_widget()
|
||||
break
|
||||
else:
|
||||
raise KeyError(f'No plugin named: {plugin_name}')
|
||||
class D(QDialog):
|
||||
def accept(self):
|
||||
if not cw.validate():
|
||||
return
|
||||
super().accept()
|
||||
|
||||
d = D(parent=parent)
|
||||
l = QVBoxLayout(d)
|
||||
l.addWidget(cw)
|
||||
bb = QDialogButtonBox(QDialogButtonBox.StandardButton.Ok | QDialogButtonBox.StandardButton.Cancel)
|
||||
bb.accepted.connect(d.accept)
|
||||
bb.rejected.connect(d.reject)
|
||||
l.addWidget(bb)
|
||||
d.resize(d.sizeHint())
|
||||
if d.exec() == QDialog.DialogCode.Accepted:
|
||||
plugin.save_settings(cw)
|
||||
|
||||
|
||||
def find_tests() -> None:
|
||||
import unittest
|
||||
class TestAIUtils(unittest.TestCase):
|
||||
|
||||
@ -4,6 +4,7 @@ __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
|
||||
import glob
|
||||
import os
|
||||
|
||||
from calibre.ai.github import GitHubAI
|
||||
from calibre.ai.google import GoogleAI
|
||||
from calibre.ai.open_router import OpenRouterAI
|
||||
from calibre.constants import numeric_version
|
||||
@ -1978,7 +1979,7 @@ plugins += [
|
||||
|
||||
# }}}
|
||||
|
||||
plugins.extend((OpenRouterAI, GoogleAI))
|
||||
plugins.extend((OpenRouterAI, GoogleAI, GitHubAI))
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Test load speed
|
||||
|
||||
@ -1621,6 +1621,10 @@ def ensure_app(headless=True):
|
||||
with _ea_lock:
|
||||
if _store_app is None and QApplication.instance() is None:
|
||||
args = sys.argv[:1]
|
||||
if not headless:
|
||||
_store_app = Application([])
|
||||
sys.excepthook = simple_excepthook
|
||||
return
|
||||
has_headless = ismacos or islinux or isbsd
|
||||
if headless and has_headless:
|
||||
args += ['-platformpluginpath', plugins_loc, '-platform', os.environ.get('CALIBRE_HEADLESS_PLATFORM', 'headless')]
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user