This commit is contained in:
Kolenka 2012-06-04 08:36:01 -07:00
commit a46213b696
4 changed files with 188 additions and 214 deletions

View File

@ -1,152 +1,111 @@
#!/usr/bin/env python __license__ = 'GPL v3'
__license__ = 'GPL v3' __copyright__ = '2012, Darko Miletic <darko.miletic at gmail.com>'
__author__ = 'Kovid Goyal and Sujata Raman, Lorenzo Vigentini' '''
__copyright__ = '2009, Kovid Goyal and Sujata Raman' www.csmonitor.com
__version__ = 'v1.02' '''
__date__ = '10, January 2010'
__description__ = 'Providing context and clarity on national and international news, peoples and cultures'
'''csmonitor.com'''
import re
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import BeautifulSoup
class CSMonitor(BasicNewsRecipe):
class ChristianScienceMonitor(BasicNewsRecipe): title = 'The Christian Science Monitor - daily'
__author__ = 'Darko Miletic'
__author__ = 'Kovid Goyal' description = 'The Christian Science Monitor is an international news organization that delivers thoughtful, global coverage via its website, weekly magazine, daily news briefing, and email newsletters.'
description = 'Providing context and clarity on national and international news, peoples and cultures' publisher = 'The Christian Science Monitor'
category = 'news, politics, USA'
cover_url = 'http://www.csmonitor.com/extension/csm_base/design/csm_design/images/csmlogo_179x46.gif' oldest_article = 2
title = 'Christian Science Monitor' max_articles_per_feed = 200
publisher = 'The Christian Science Monitor' no_stylesheets = True
category = 'News, politics, culture, economy, general interest' encoding = 'utf8'
language = 'en'
encoding = 'utf-8'
timefmt = '[%a, %d %b, %Y]'
oldest_article = 16
max_articles_per_feed = 20
use_embedded_content = False use_embedded_content = False
recursion = 10 language = 'en'
remove_empty_feeds = True
publication_type = 'newspaper'
masthead_url = 'http://www.csmonitor.com/extension/csm_base/design/csm_design/images/csmlogo_179x46.gif'
extra_css = """
body{font-family: Arial,Tahoma,Verdana,Helvetica,sans-serif }
img{margin-bottom: 0.4em; display:block}
.head {font-family: Georgia,"Times New Roman",Times,serif}
.sByline,.caption{font-size: x-small}
.hide{display: none}
.sLoc{font-weight: bold}
ul{list-style-type: none}
"""
remove_javascript = True conversion_options = {
no_stylesheets = True 'comment' : description
requires_version = (0, 8, 39) , 'tags' : category
, 'publisher' : publisher
, 'language' : language
}
def preprocess_raw_html(self, raw, url): remove_tags = [
try: dict(name=['meta','link','iframe','object','embed'])
from html5lib import parse ,dict(attrs={'class':['podStoryRel','bottom-rel','hide']})
root = parse(raw, namespaceHTMLElements=False, ,dict(attrs={'id':['pgallerycarousel_enlarge','pgallerycarousel_related']})
treebuilder='lxml').getroot() ]
from lxml import etree keep_only_tags = [
for tag in root.xpath( dict(name='h1', attrs={'class':'head'})
'//script|//style|//noscript|//meta|//link|//object'): ,dict(name='h2', attrs={'class':'subhead'})
tag.getparent().remove(tag) ,dict(attrs={'class':['sByline','podStoryGal','ui-body-header','sBody']})
for elem in list(root.iterdescendants(tag=etree.Comment)): ]
elem.getparent().remove(elem) remove_attributes=['xmlns:fb']
ans = etree.tostring(root, encoding=unicode)
ans = re.sub('.*<html', '<html', ans, flags=re.DOTALL)
return ans
except:
import traceback
traceback.print_exc()
raise
def index_to_soup(self, url): feeds = [
raw = BasicNewsRecipe.index_to_soup(self, url, (u'USA' , u'http://rss.csmonitor.com/feeds/usa' )
raw=True).decode('utf-8') ,(u'World' , u'http://rss.csmonitor.com/feeds/world' )
raw = self.preprocess_raw_html(raw, url) ,(u'Politics' , u'http://rss.csmonitor.com/feeds/politics' )
return BasicNewsRecipe.index_to_soup(self, raw) ,(u'Business' , u'http://rss.csmonitor.com/feeds/wam' )
,(u'Commentary' , u'http://rss.csmonitor.com/feeds/commentary' )
def append_page(self, soup, appendtag, position): ,(u'Books' , u'http://rss.csmonitor.com/feeds/books' )
nav = soup.find('div',attrs={'class':'navigation'}) ,(u'Arts' , u'http://rss.csmonitor.com/feeds/arts' )
if nav: ,(u'Environment' , u'http://rss.csmonitor.com/feeds/environment')
pager = nav.findAll('a') ,(u'Innovation' , u'http://rss.csmonitor.com/feeds/scitech' )
for part in pager: ,(u'Living' , u'http://rss.csmonitor.com/feeds/living' )
if 'Next' in part: ,(u'Science' , u'http://rss.csmonitor.com/feeds/science' )
nexturl = ('http://www.csmonitor.com' + ,(u'The Culture' , u'http://rss.csmonitor.com/feeds/theculture' )
re.findall(r'href="(.*?)"', str(part))[0]) ,(u'The Home Forum', u'http://rss.csmonitor.com/feeds/homeforum' )
soup2 = self.index_to_soup(nexturl) ,(u'Articles' , u'http://rss.csmonitor.com/feeds/csarticles' )
texttag = soup2.find('div', ]
attrs={'class': re.compile('list-article-.*')})
trash_c = soup2.findAll(attrs={'class': 'list-description'}) def append_page(self, soup):
trash_h = soup2.h1 pager = soup.find('div', attrs={'class':'navigation'})
for tc in trash_c: tc.extract() if pager:
trash_h.extract() nexttag = pager.find(attrs={'id':'next-button'})
if nexttag:
newpos = len(texttag.contents) nurl = 'http://www.csmonitor.com' + nexttag['href']
self.append_page(soup2, texttag, newpos) soup2 = self.index_to_soup(nurl)
texttag.extract() texttag = soup2.find(attrs={'class':'sBody'})
appendtag.insert(position, texttag) if texttag:
appendtag = soup.find(attrs={'class':'sBody'})
for citem in texttag.findAll(attrs={'class':['podStoryRel','bottom-rel','hide']}):
citem.extract()
self.append_page(soup2)
texttag.extract()
pager.extract()
appendtag.append(texttag)
def preprocess_html(self, soup): def preprocess_html(self, soup):
PRINT_RE = re.compile(r'/layout/set/print/content/view/print/[0-9]*') self.append_page(soup)
html = str(soup) pager = soup.find('div', attrs={'class':'navigation'})
try: if pager:
print_found = PRINT_RE.findall(html) pager.extract()
except Exception: for item in soup.findAll('a'):
pass limg = item.find('img')
if print_found: if item.string is not None:
print_url = 'http://www.csmonitor.com' + print_found[0] str = item.string
print_soup = self.index_to_soup(print_url) item.replaceWith(str)
else: else:
self.append_page(soup, soup.body, 3) if limg:
item.name = 'div'
trash_a = soup.findAll(attrs={'class': re.compile('navigation.*')}) item.attrs = []
trash_b = soup.findAll(attrs={'style': re.compile('.*')}) else:
trash_d = soup.findAll(attrs={'class': 'sByline'}) str = self.tag_to_string(item)
for ta in trash_a: ta.extract() item.replaceWith(str)
for tb in trash_b: tb.extract() for item in soup.findAll('img'):
for td in trash_d: td.extract() if 'scorecardresearch' in item['src']:
item.extract()
print_soup = soup else:
return print_soup if not item.has_key('alt'):
item['alt'] = 'image'
extra_css = ''' return soup
h1{ color:#000000;font-family: Georgia,Times,"Times New Roman",serif; font-size: large}
.sub{ color:#000000;font-family: Georgia,Times,"Times New Roman",serif; font-size: small;}
.byline{ font-family:Arial,Helvetica,sans-serif ; color:#999999; font-size: x-small;}
.postdate{color:#999999 ; font-family:Arial,Helvetica,sans-serif ; font-size: x-small; }
h3{color:#999999 ; font-family:Arial,Helvetica,sans-serif ; font-size: x-small; }
.photoCutline{ color:#333333 ; font-family:Arial,Helvetica,sans-serif ; font-size: x-small; }
.photoCredit{ color:#999999 ; font-family:Arial,Helvetica,sans-serif ; font-size: x-small; }
#story{font-family:Arial,Tahoma,Verdana,Helvetica,sans-serif ; font-size: small; }
#main{font-family:Arial,Tahoma,Verdana,Helvetica,sans-serif ; font-size: small; }
#photo-details{ font-family:Arial,Helvetica,sans-serif ; color:#999999; font-size: x-small;}
span.name{color:#205B87;font-family: Georgia,Times,"Times New Roman",serif; font-size: x-small}
p#dateline{color:#444444 ; font-family:Arial,Helvetica,sans-serif ; font-style:italic;} '''
feeds = [(u'Top Stories', u'http://rss.csmonitor.com/feeds/top'),
(u'World' , u'http://rss.csmonitor.com/feeds/world'),
(u'USA' , u'http://rss.csmonitor.com/feeds/usa'),
(u'Commentary' , u'http://rss.csmonitor.com/feeds/commentary'),
(u'Money' , u'http://rss.csmonitor.com/feeds/wam'),
(u'Learning' , u'http://rss.csmonitor.com/feeds/learning'),
(u'Living', u'http://rss.csmonitor.com/feeds/living'),
(u'Innovation', u'http://rss.csmonitor.com/feeds/scitech'),
(u'Gardening', u'http://rss.csmonitor.com/feeds/gardening'),
(u'Environment',u'http://rss.csmonitor.com/feeds/environment'),
(u'Arts', u'http://rss.csmonitor.com/feeds/arts'),
(u'Books', u'http://rss.csmonitor.com/feeds/books'),
(u'Home Forum' , u'http://rss.csmonitor.com/feeds/homeforum')
]
keep_only_tags = [dict(name='div', attrs={'id':'mainColumn'}), ]
remove_tags = [
dict(name='div', attrs={'id':['story-tools','videoPlayer','storyRelatedBottom','enlarge-photo','photo-paginate']}),
dict(name=['div','a'], attrs={'class':
['storyToolbar cfx','podStoryRel','spacer3',
'divvy spacer7','comment','storyIncludeBottom',
'hide', 'podBrdr']}),
dict(name='ul', attrs={'class':[ 'centerliststories']}) ,
dict(name='form', attrs={'id':[ 'commentform']}) ,
dict(name='div', attrs={'class': ['ui-comments']})
]
remove_tags_after = [ dict(name='div', attrs={'class':[ 'ad csmAd']}),
dict(name='div', attrs={'class': [re.compile('navigation.*')]}),
dict(name='div', attrs={'style': [re.compile('.*')]})
]

View File

@ -1,5 +1,6 @@
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3' __license__ = 'GPL v3'
__copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>' __copyright__ = '2010-2012, Darko Miletic <darko.miletic at gmail.com>'
''' '''
www.elpais.com www.elpais.com
''' '''
@ -7,23 +8,24 @@ www.elpais.com
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class ElPais_RSS(BasicNewsRecipe): class ElPais_RSS(BasicNewsRecipe):
title = 'El Pais' title = u'El País'
__author__ = 'Darko Miletic' __author__ = 'Darko Miletic'
description = 'el periodico global en Castellano' description = u'Noticias de última hora sobre la actualidad en España y el mundo: política, economía, deportes, cultura, sociedad, tecnología, gente, opinión, viajes, moda, televisión, los blogs y las firmas de EL PAÍS. Además especiales, vídeos, fotos, audios, gráficos, entrevistas, promociones y todos los servicios de EL PAÍS.'
publisher = 'EDICIONES EL PAIS, S.L.' publisher = 'EDICIONES EL PAIS, S.L.'
category = 'news, politics, finances, world, spain' category = 'news, politics, finances, world, spain'
oldest_article = 2 oldest_article = 2
max_articles_per_feed = 200 max_articles_per_feed = 200
no_stylesheets = True no_stylesheets = True
encoding = 'cp1252' encoding = 'utf8'
use_embedded_content = False use_embedded_content = False
language = 'es' language = 'es'
remove_empty_feeds = True remove_empty_feeds = True
publication_type = 'newspaper' publication_type = 'newspaper'
masthead_url = 'http://www.elpais.com/im/tit_logo.gif' masthead_url = 'http://ep01.epimg.net/iconos/v1.x/v1.0/logos/cabecera_portada.png'
extra_css = """ extra_css = """
body{font-family: Georgia,"Times New Roman",Times,serif } h1{font-family: Georgia,"Times New Roman",Times,serif }
h3{font-family: Arial,Helvetica,sans-serif} #subtitulo_noticia, .firma, .figcaption{font-size: small}
body{font-family: Arial,Helvetica,Garuda,sans-serif}
img{margin-bottom: 0.4em; display:block} img{margin-bottom: 0.4em; display:block}
""" """
@ -34,49 +36,61 @@ class ElPais_RSS(BasicNewsRecipe):
, 'language' : language , 'language' : language
} }
keep_only_tags = [dict(attrs={'class':['cabecera_noticia estirar','cabecera_noticia','','contenido_noticia']})] keep_only_tags = [
remove_tags = [ dict(attrs={'id':['titulo_noticia','subtitulo_noticia']})
dict(name=['meta','link','base','iframe','embed','object']) ,dict(attrs={'class':['firma','columna_texto','entrevista_p_r']})
,dict(attrs={'class':['info_complementa','estructura_2col_der','votos estirar','votos']}) ]
,dict(attrs={'id':'utilidades'}) remove_tags = [
dict(name=['meta','link','base','iframe','embed','object'])
,dict(attrs={'class':'disposicion_vertical'})
] ]
remove_tags_after = dict(attrs={'id':'utilidades'})
remove_attributes = ['lang','border','width','height']
feeds = [ feeds = [
(u'Lo ultimo' , u'http://www.elpais.com/rss/feed.html?feedId=17046') (u'Lo ultimo' , u'http://ep00.epimg.net/rss/tags/ultimas_noticias.xml')
,(u'America Latina' , u'http://www.elpais.com/rss/feed.html?feedId=17041') ,(u'America Latina' , u'http://elpais.com/tag/rss/latinoamerica/a/' )
,(u'Mexico' , u'http://www.elpais.com/rss/feed.html?feedId=17042') ,(u'Mexico' , u'http://elpais.com/tag/rss/mexico/a/' )
,(u'Europa' , u'http://www.elpais.com/rss/feed.html?feedId=17043') ,(u'Europa' , u'http://elpais.com/tag/rss/europa/a/' )
,(u'Estados Unidos' , u'http://www.elpais.com/rss/feed.html?feedId=17044') ,(u'Estados Unidos' , u'http://elpais.com/tag/rss/estados_unidos/a/' )
,(u'Oriente proximo' , u'http://www.elpais.com/rss/feed.html?feedId=17045') ,(u'Oriente proximo' , u'http://elpais.com/tag/rss/oriente_proximo/a/' )
,(u'Espana' , u'http://www.elpais.com/rss/feed.html?feedId=1002' ) ,(u'Andalucia' , u'http://ep00.epimg.net/rss/ccaa/andalucia.xml' )
,(u'Andalucia' , u'http://www.elpais.com/rss/feed.html?feedId=17057') ,(u'Catalunia' , u'http://ep00.epimg.net/rss/ccaa/catalunya.xml' )
,(u'Catalunia' , u'http://www.elpais.com/rss/feed.html?feedId=17059') ,(u'Comunidad Valenciana' , u'http://ep00.epimg.net/rss/ccaa/valencia.xml' )
,(u'Comunidad Valenciana' , u'http://www.elpais.com/rss/feed.html?feedId=17061') ,(u'Madrid' , u'http://ep00.epimg.net/rss/ccaa/madrid.xml' )
,(u'Madrid' , u'http://www.elpais.com/rss/feed.html?feedId=1016' ) ,(u'Pais Vasco' , u'http://ep00.epimg.net/rss/ccaa/paisvasco.xml' )
,(u'Pais Vasco' , u'http://www.elpais.com/rss/feed.html?feedId=17062') ,(u'Galicia' , u'http://ep00.epimg.net/rss/ccaa/galicia.xml' )
,(u'Galicia' , u'http://www.elpais.com/rss/feed.html?feedId=17063') ,(u'Sociedad' , u'http://ep00.epimg.net/rss/sociedad/portada.xml' )
,(u'Opinion' , u'http://www.elpais.com/rss/feed.html?feedId=1003' ) ,(u'Deportes' , u'http://ep00.epimg.net/rss/deportes/portada.xml' )
,(u'Sociedad' , u'http://www.elpais.com/rss/feed.html?feedId=1004' ) ,(u'Cultura' , u'http://ep00.epimg.net/rss/cultura/portada.xml' )
,(u'Deportes' , u'http://www.elpais.com/rss/feed.html?feedId=1007' ) ,(u'Cine' , u'http://elpais.com/tag/rss/cine/a/' )
,(u'Cultura' , u'http://www.elpais.com/rss/feed.html?feedId=1008' ) ,(u'Economía' , u'http://elpais.com/tag/rss/economia/a/' )
,(u'Cine' , u'http://www.elpais.com/rss/feed.html?feedId=17052') ,(u'Literatura' , u'http://elpais.com/tag/rss/libros/a/' )
,(u'Literatura' , u'http://www.elpais.com/rss/feed.html?feedId=17053') ,(u'Musica' , u'http://elpais.com/tag/rss/musica/a/' )
,(u'Musica' , u'http://www.elpais.com/rss/feed.html?feedId=17051') ,(u'Arte' , u'http://elpais.com/tag/rss/arte/a/' )
,(u'Arte' , u'http://www.elpais.com/rss/feed.html?feedId=17060') ,(u'Medio Ambiente' , u'http://elpais.com/tag/rss/medio_ambiente/a/' )
,(u'Tecnologia' , u'http://www.elpais.com/rss/feed.html?feedId=1005' ) ,(u'Tecnologia' , u'http://ep01.epimg.net/rss/tecnologia/portada.xml' )
,(u'Economia' , u'http://www.elpais.com/rss/feed.html?feedId=1006' ) ,(u'Ciencia' , u'http://ep00.epimg.net/rss/tags/c_ciencia.xml' )
,(u'Ciencia' , u'http://www.elpais.com/rss/feed.html?feedId=17068') ,(u'Salud' , u'http://elpais.com/tag/rss/salud/a/' )
,(u'Salud' , u'http://www.elpais.com/rss/feed.html?feedId=17074') ,(u'Ocio' , u'http://elpais.com/tag/rss/ocio/a/' )
,(u'Ocio' , u'http://www.elpais.com/rss/feed.html?feedId=17075') ,(u'Justicia y Leyes' , u'http://elpais.com/tag/rss/justicia/a/' )
,(u'Justicia y Leyes' , u'http://www.elpais.com/rss/feed.html?feedId=17069') ,(u'Guerras y conflictos' , u'http://elpais.com/tag/rss/conflictos/a/' )
,(u'Guerras y conflictos' , u'http://www.elpais.com/rss/feed.html?feedId=17070') ,(u'Politica' , u'http://ep00.epimg.net/rss/politica/portada.xml' )
,(u'Politica' , u'http://www.elpais.com/rss/feed.html?feedId=17073') ,(u'Opinion' , u'http://ep01.epimg.net/rss/politica/opinion.xml' )
] ]
def print_version(self, url): def get_article_url(self, article):
return url + '?print=1' url = BasicNewsRecipe.get_article_url(self, article)
if url and (not('/album/' in url) and not('/futbol/partido/' in url)):
return url
self.log('Skipping non-article', url)
return None
def get_cover_url(self):
soup = self.index_to_soup('http://elpais.com/')
for image in soup.findAll('img'):
if image['src'].endswith('elpaisTodayMiddle.jpg'):
sstr = image['src']
return sstr.replace('elpaisTodayMiddle.jpg', 'elpaisToday.jpg')
return None
def preprocess_html(self, soup): def preprocess_html(self, soup):
for item in soup.findAll(style=True): for item in soup.findAll(style=True):

View File

@ -273,37 +273,37 @@ class PRST1(USBMS):
self.update_device_collections(connection, booklist, collections, source_id, dbpath) self.update_device_collections(connection, booklist, collections, source_id, dbpath)
debug_print('PRST1: finished update_device_database') debug_print('PRST1: finished update_device_database')
def get_database_min_id(self, source_id): def get_database_min_id(self, source_id):
sequence_min = 0L sequence_min = 0L
if source_id == '1': if source_id == '1':
sequence_min = 4294967296L sequence_min = 4294967296L
return sequence_min return sequence_min
def set_database_sequence_id(self, connection, table, sequence_id): def set_database_sequence_id(self, connection, table, sequence_id):
cursor = connection.cursor() cursor = connection.cursor()
# Update the sequence Id if it exists # Update the sequence Id if it exists
query = 'UPDATE sqlite_sequence SET seq = ? WHERE name = ?' query = 'UPDATE sqlite_sequence SET seq = ? WHERE name = ?'
t = (sequence_id, table,) t = (sequence_id, table,)
cursor.execute(query, t) cursor.execute(query, t)
# Insert the sequence Id if it doesn't # Insert the sequence Id if it doesn't
query = ('INSERT INTO sqlite_sequence (name, seq) ' query = ('INSERT INTO sqlite_sequence (name, seq) '
'SELECT ?, ? ' 'SELECT ?, ? '
'WHERE NOT EXISTS (SELECT 1 FROM sqlite_sequence WHERE name = ?)'); 'WHERE NOT EXISTS (SELECT 1 FROM sqlite_sequence WHERE name = ?)');
cursor.execute(query, (table, sequence_id, table,)) cursor.execute(query, (table, sequence_id, table,))
cursor.close() cursor.close()
def read_device_books(self, connection, source_id, dbpath): def read_device_books(self, connection, source_id, dbpath):
from sqlite3 import DatabaseError from sqlite3 import DatabaseError
sequence_min = self.get_database_min_id(source_id) sequence_min = self.get_database_min_id(source_id)
sequence_max = sequence_min sequence_max = sequence_min
sequence_dirty = 0 sequence_dirty = 0
try: try:
cursor = connection.cursor() cursor = connection.cursor()
@ -340,12 +340,12 @@ class PRST1(USBMS):
# Record the new Id and write it to the DB # Record the new Id and write it to the DB
db_books[book] = sequence_max db_books[book] = sequence_max
sequence_max = sequence_max + 1 sequence_max = sequence_max + 1
# Fix the Books DB # Fix the Books DB
query = 'UPDATE books SET _id = ? WHERE file_path = ?' query = 'UPDATE books SET _id = ? WHERE file_path = ?'
t = (db_books[book], book,) t = (db_books[book], book,)
cursor.execute(query, t) cursor.execute(query, t)
# Fix any references so that they point back to the right book # Fix any references so that they point back to the right book
t = (db_books[book], bookId,) t = (db_books[book], bookId,)
query = 'UPDATE collections SET content_id = ? WHERE content_id = ?' query = 'UPDATE collections SET content_id = ? WHERE content_id = ?'
@ -368,7 +368,7 @@ class PRST1(USBMS):
cursor.execute(query, t) cursor.execute(query, t)
query = 'UPDATE preference SET content_id = ? WHERE content_id = ?' query = 'UPDATE preference SET content_id = ? WHERE content_id = ?'
cursor.execute(query, t) cursor.execute(query, t)
self.set_database_sequence_id(connection, 'books', sequence_max) self.set_database_sequence_id(connection, 'books', sequence_max)
cursor.close() cursor.close()
@ -383,7 +383,7 @@ class PRST1(USBMS):
db_books = self.read_device_books(connection, source_id, dbpath) db_books = self.read_device_books(connection, source_id, dbpath)
cursor = connection.cursor() cursor = connection.cursor()
for book in booklist: for book in booklist:
# Run through plugboard if needed # Run through plugboard if needed
if plugboard is not None: if plugboard is not None:
@ -464,11 +464,11 @@ class PRST1(USBMS):
def read_device_collections(self, connection, source_id, dbpath): def read_device_collections(self, connection, source_id, dbpath):
from sqlite3 import DatabaseError from sqlite3 import DatabaseError
sequence_min = self.get_database_min_id(source_id) sequence_min = self.get_database_min_id(source_id)
sequence_max = sequence_min sequence_max = sequence_min
sequence_dirty = 0 sequence_dirty = 0
try: try:
cursor = connection.cursor() cursor = connection.cursor()
@ -492,7 +492,7 @@ class PRST1(USBMS):
if row[0] < sequence_min: if row[0] < sequence_min:
sequence_dirty = 1 sequence_dirty = 1
else: else:
sequence_max = max(sequence_max, row[0]) sequence_max = max(sequence_max, row[0])
# If the database is 'dirty', then we should fix up the Ids and the sequence number # If the database is 'dirty', then we should fix up the Ids and the sequence number
if sequence_dirty == 1: if sequence_dirty == 1:
@ -502,26 +502,26 @@ class PRST1(USBMS):
# Record the new Id and write it to the DB # Record the new Id and write it to the DB
db_collections[collection] = sequence_max db_collections[collection] = sequence_max
sequence_max = sequence_max + 1 sequence_max = sequence_max + 1
# Fix the collection DB # Fix the collection DB
query = 'UPDATE collection SET _id = ? WHERE title = ?' query = 'UPDATE collection SET _id = ? WHERE title = ?'
t = (db_collections[collection], collection, ) t = (db_collections[collection], collection, )
cursor.execute(query, t) cursor.execute(query, t)
# Fix any references in existing collections # Fix any references in existing collections
query = 'UPDATE collections SET collection_id = ? WHERE collection_id = ?' query = 'UPDATE collections SET collection_id = ? WHERE collection_id = ?'
t = (db_collections[collection], collectionId,) t = (db_collections[collection], collectionId,)
cursor.execute(query, t) cursor.execute(query, t)
self.set_database_sequence_id(connection, 'collection', sequence_max) self.set_database_sequence_id(connection, 'collection', sequence_max)
# Fix up the collections table now... # Fix up the collections table now...
sequence_dirty = 0 sequence_dirty = 0
sequence_max = sequence_min sequence_max = sequence_min
query = 'SELECT _id FROM collections' query = 'SELECT _id FROM collections'
cursor.execute(query) cursor.execute(query)
db_collection_pairs = [] db_collection_pairs = []
for i, row in enumerate(cursor): for i, row in enumerate(cursor):
db_collection_pairs.append(row[0]) db_collection_pairs.append(row[0])
@ -539,12 +539,12 @@ class PRST1(USBMS):
t = (sequence_max, pairId,) t = (sequence_max, pairId,)
cursor.execute(query, t) cursor.execute(query, t)
sequence_max = sequence_max + 1 sequence_max = sequence_max + 1
self.set_database_sequence_id(connection, 'collections', sequence_max) self.set_database_sequence_id(connection, 'collections', sequence_max)
cursor.close() cursor.close()
return db_collections return db_collections
def update_device_collections(self, connection, booklist, collections, def update_device_collections(self, connection, booklist, collections,
source_id, dbpath): source_id, dbpath):
cursor = connection.cursor() cursor = connection.cursor()

View File

@ -382,7 +382,8 @@ class USBMS(CLI, Device):
os.makedirs(self.normalize_path(self._main_prefix)) os.makedirs(self.normalize_path(self._main_prefix))
def write_prefix(prefix, listid): def write_prefix(prefix, listid):
if prefix is not None and isinstance(booklists[listid], self.booklist_class): if (prefix is not None and len(booklists) > listid and
isinstance(booklists[listid], self.booklist_class)):
if not os.path.exists(prefix): if not os.path.exists(prefix):
os.makedirs(self.normalize_path(prefix)) os.makedirs(self.normalize_path(prefix))
with open(self.normalize_path(os.path.join(prefix, self.METADATA_CACHE)), 'wb') as f: with open(self.normalize_path(os.path.join(prefix, self.METADATA_CACHE)), 'wb') as f: