Merge from trunk

This commit is contained in:
Charles Haley 2012-01-10 11:49:29 +01:00
commit 014d9300a8
19 changed files with 175 additions and 32 deletions

View File

@ -0,0 +1,11 @@
from calibre.web.feeds.news import BasicNewsRecipe
class BasicUserRecipe1324913694(BasicNewsRecipe):
title = u'Derin Dusunce'
language = 'tr'
__author__ = 'asalet_r'
oldest_article = 7
max_articles_per_feed = 20
auto_cleanup = True
feeds = [(u'Derin D\xfc\u015f\xfcnce', u'http://www.derindusunce.org/feed/')]

View File

@ -0,0 +1,12 @@
from calibre.web.feeds.news import BasicNewsRecipe
class BasicUserRecipe1324736687(BasicNewsRecipe):
title = u'D\xfcnya Bizim'
language = 'tr'
__author__ = 'asalet_r'
oldest_article = 7
max_articles_per_feed = 10
auto_cleanup = True
feeds = [(u'Aktif \u0130mamlar', u'http://dunyabizim.com/servisler/rss.php?kategoriID=31'), (u'Ayr\u0131nt\u0131 Defteri', u'http://dunyabizim.com/servisler/rss.php?kategoriID=58'), (u'Baba Kitaplar', u'http://dunyabizim.com/servisler/rss.php?kategoriID=4'), (u'Bu da Oldu', u'http://dunyabizim.com/servisler/rss.php?kategoriID=32'), (u'\xc7-al\u0131nt\u0131 Yaz\u0131lar', u'http://dunyabizim.com/servisler/rss.php?kategoriID=33'), (u'Dar\xfclmedya', u'http://dunyabizim.com/servisler/rss.php?kategoriID=49'), (u'Gidenler', u'http://dunyabizim.com/servisler/rss.php?kategoriID=59'), (u'G\xfczel Mekanlar', u'http://dunyabizim.com/servisler/rss.php?kategoriID=43'), (u'\u0130yi Haberler', u'http://dunyabizim.com/servisler/rss.php?kategoriID=18'), (u'\u0130yi M\xfczikler', u'http://dunyabizim.com/servisler/rss.php?kategoriID=2'), (u'Kalite Dergiler', u'http://dunyabizim.com/servisler/rss.php?kategoriID=3'), (u'Konu\u015fa Konu\u015fa', u'http://dunyabizim.com/servisler/rss.php?kategoriID=24'), (u'M\xfcstesta G\xfczeller', u'http://dunyabizim.com/servisler/rss.php?kategoriID=65'), (u'O \u015eimdi Nerede?', u'http://dunyabizim.com/servisler/rss.php?kategoriID=52'), (u'Olsa Ke\u015fke', u'http://dunyabizim.com/servisler/rss.php?kategoriID=34'), (u'Orada Ne Oldu?', u'http://dunyabizim.com/servisler/rss.php?kategoriID=38'), (u'\xd6nemli Adamlar', u'http://dunyabizim.com/servisler/rss.php?kategoriID=1'), (u'Polemik', u'http://dunyabizim.com/servisler/rss.php?kategoriID=39'), (u'Sinema', u'http://dunyabizim.com/servisler/rss.php?kategoriID=23'), (u'Yalan Haber', u'http://dunyabizim.com/servisler/rss.php?kategoriID=40'), (u'Yeni \u015eeyler', u'http://dunyabizim.com/servisler/rss.php?kategoriID=57'), (u'Zekeriya Sofras\u0131', u'http://dunyabizim.com/servisler/rss.php?kategoriID=60')]

View File

@ -0,0 +1,12 @@
from calibre.web.feeds.news import BasicNewsRecipe
class BasicUserRecipe1321194347(BasicNewsRecipe):
title = u'D\xfcnya B\xfclteni'
language = 'tr'
__author__ = 'asalet_r'
oldest_article = 7
max_articles_per_feed = 50
auto_cleanup = True
feeds = [(u'Tarih Dosyas\u0131', u'http://www.dunyabulteni.net/servisler/rss.php?kategoriID=157'), (u'R\xf6portaj', u'http://www.dunyabulteni.net/servisler/rss.php?kategoriID=153'), (u'Makale-Yorum', u'http://www.dunyabulteni.net/servisler/rss.php?kategoriID=174'), (u'K\xfclt\xfcr-Sanat', u'http://www.dunyabulteni.net/servisler/rss.php?kategoriID=66'), (u'Hayat\u0131n \u0130\xe7inden', u'http://www.dunyabulteni.net/servisler/rss.php?kategoriID=200'), (u'Haber Analiz', u'http://www.dunyabulteni.net/servisler/rss.php?kategoriID=123'), (u'Gezi-\u0130zlenim', u'http://www.dunyabulteni.net/servisler/rss.php?kategoriID=90'), (u'Aile Sa\u011fl\u0131k E\u011fitim', u'http://www.dunyabulteni.net/servisler/rss.php?kategoriID=75')]

11
recipes/haksoz.recipe Normal file
View File

@ -0,0 +1,11 @@
from calibre.web.feeds.news import BasicNewsRecipe
class BasicUserRecipe1324739199(BasicNewsRecipe):
title = u'Haks\xf6z'
oldest_article = 7
max_articles_per_feed = 20
auto_cleanup = True
language = 'tr'
__author__ = 'asalet_r'
feeds = [(u'Haks\xf6z', u'http://www.haksozhaber.net/rss/')]

12
recipes/iktibas.recipe Normal file
View File

@ -0,0 +1,12 @@
from calibre.web.feeds.news import BasicNewsRecipe
class BasicUserRecipe1324739406(BasicNewsRecipe):
title = u'\u0130ktibas'
language = 'tr'
__author__ = 'asalet_r'
oldest_article = 7
max_articles_per_feed = 20
auto_cleanup = True
feeds = [(u'\u0130ktibas', u'http://www.iktibasdergisi.com/rss/rss.xml')]

View File

@ -0,0 +1,12 @@
from calibre.web.feeds.news import BasicNewsRecipe
class BasicUserRecipe1324158549(BasicNewsRecipe):
title = u'izdiham.com'
language = 'tr'
__author__ = 'asalet_r'
oldest_article = 7
max_articles_per_feed = 20
auto_cleanup = True
feeds = [(u'\u0130zdiham', u'http://www.izdiham.com/index.php/feed')]

14
recipes/lega_nerd.recipe Normal file
View File

@ -0,0 +1,14 @@
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1326135232(BasicNewsRecipe):
title = u'Lega Nerd'
description = 'nerd / geek culture, pc, comics, music, culture'
language = 'it'
oldest_article = 7
max_articles_per_feed = 100
auto_cleanup = True
feeds = [(u'Lega Nerd', u'http://feeds.feedburner.com/LegaNerd')]
__author__ = 'faber1971'
__version__ = 'v1.0'
__date__ = '9, January 2011'

View File

@ -13,7 +13,7 @@ class Moscowtimes(BasicNewsRecipe):
category = 'Russia, Moscow, Russian news, Moscow news, Russian newspaper, daily news, independent news, reliable news, USSR, Soviet Union, CIS, Russian politics, Russian business, Russian culture, Russian opinion, St Petersburg, Saint Petersburg' category = 'Russia, Moscow, Russian news, Moscow news, Russian newspaper, daily news, independent news, reliable news, USSR, Soviet Union, CIS, Russian politics, Russian business, Russian culture, Russian opinion, St Petersburg, Saint Petersburg'
publisher = 'The Moscow Times' publisher = 'The Moscow Times'
language = 'en' language = 'en'
oldest_article = 4 oldest_article = 2
max_articles_per_feed = 100 max_articles_per_feed = 100
no_stylesheets = True no_stylesheets = True
use_embedded_content = False use_embedded_content = False

View File

@ -6,7 +6,7 @@
## Copyright: Kiavash ## Copyright: Kiavash
## ##
## Written: Jan 2012 ## Written: Jan 2012
## Last Edited: 2012-01-07 ## Last Edited: Jan 2012
## ##
__license__ = 'GNU General Public License v3 - http://www.gnu.org/copyleft/gpl.html' __license__ = 'GNU General Public License v3 - http://www.gnu.org/copyleft/gpl.html'
@ -18,14 +18,16 @@ Microwave Journal Monthly Magazine
You need to sign up (free) and get username/password. You need to sign up (free) and get username/password.
''' '''
import re # Import the regular expressions module. import re # Import the regular expressions module.
from calibre.ptempfile import TemporaryFile # we need this for saving to a temp file from calibre.ptempfile import TemporaryFile # we need this for saving to a temp file
from calibre.web.feeds.news import BasicNewsRecipe from calibre.web.feeds.news import BasicNewsRecipe
class MWJournal(BasicNewsRecipe): class MWJournal(BasicNewsRecipe):
# Title to use for the ebook. # Title to use for the ebook.
title = u'Microwave Journal' title = u'Microwave Journal'
__author__ = 'Kiavash' __author__ = 'Kiavash'
language = 'en'
#A brief description for the ebook. #A brief description for the ebook.
description = u'Microwave Journal web site ebook created using rss feeds.' description = u'Microwave Journal web site ebook created using rss feeds.'
@ -33,9 +35,8 @@ class MWJournal(BasicNewsRecipe):
# Set publisher and publication type. # Set publisher and publication type.
publisher = 'Horizon House' publisher = 'Horizon House'
publication_type = 'magazine' publication_type = 'magazine'
language = 'en'
oldest_article = 31 # monthly published magazine. Some months are 31 days! oldest_article = 31 # monthly published magazine. Some months are 31 days!
max_articles_per_feed = 100 max_articles_per_feed = 100
remove_empty_feeds = True remove_empty_feeds = True
auto_cleanup = True auto_cleanup = True
@ -44,43 +45,50 @@ class MWJournal(BasicNewsRecipe):
no_stylesheets = True no_stylesheets = True
remove_javascript = True remove_javascript = True
needs_subscription = True # oh yeah... we need to login btw. asciiize = True # Converts all none ascii characters to their ascii equivalents
needs_subscription = True # oh yeah... we need to login btw.
# Timeout for fetching files from the server in seconds. The default of 120 seconds, seems somewhat excessive. # Timeout for fetching files from the server in seconds. The default of 120 seconds, seems somewhat excessive.
timeout = 30 timeout = 30
# Specify extra CSS - overrides ALL other CSS (IE. Added last). # Specify extra CSS - overrides ALL other CSS (IE. Added last).
extra_css = 'body { font-family: verdana, helvetica, sans-serif; } \ extra_css = 'body { font-family: verdana, helvetica, sans-serif; } \
.introduction, .first { font-weight: bold; } \ .introduction, .first { font-weight: bold; } \
.cross-head { font-weight: bold; font-size: 125%; } \ .cross-head { font-weight: bold; font-size: 125%; } \
.cap, .caption { display: block; font-size: 80%; font-style: italic; } \ .cap, .caption { display: block; font-size: 80%; font-style: italic; } \
.cap, .caption, .caption img, .caption span { display: block; text-align: center; margin: 5px auto; } \ .cap, .caption, .caption img, .caption span { display: block; margin: 5px auto; } \
.byl, .byd, .byline img, .byline-name, .byline-title, .author-name, .author-position, \ .byl, .byd, .byline img, .byline-name, .byline-title, .author-name, .author-position, \
.correspondent-portrait img, .byline-lead-in, .name, .bbc-role { display: block; \ .correspondent-portrait img, .byline-lead-in, .name, .bbc-role { display: block; \
text-align: center; font-size: 80%; font-style: italic; margin: 1px auto; } \ font-size: 80%; font-style: italic; margin: 1px auto; } \
.story-date, .published { font-size: 80%; } \ .story-date, .published { font-size: 80%; } \
table { width: 100%; } \ table { width: 100%; } \
td img { display: block; margin: 5px auto; } \ td img { display: block; margin: 5px auto; } \
ul { padding-top: 10px; } \ ul { padding-top: 10px; } \
ol { padding-top: 10px; } \ ol { padding-top: 10px; } \
li { padding-top: 5px; padding-bottom: 5px; } \ li { padding-top: 5px; padding-bottom: 5px; } \
h1 { text-align: center; font-size: 175%; font-weight: bold; } \ h1 { font-size: 175%; font-weight: bold; } \
h2 { text-align: center; font-size: 150%; font-weight: bold; } \ h2 { font-size: 150%; font-weight: bold; } \
h3 { text-align: center; font-size: 125%; font-weight: bold; } \ h3 { font-size: 125%; font-weight: bold; } \
h4, h5, h6 { text-align: center; font-size: 100%; font-weight: bold; }' h4, h5, h6 { font-size: 100%; font-weight: bold; }'
remove_tags = [ remove_tags = [
dict(name='div', attrs={'class':'boxadzonearea350'}), # Removes banner ads dict(name='div', attrs={'class':'boxadzonearea350'}), # Removes banner ads
dict(name='font', attrs={'class':'footer'}), # remove fonts if you do like your fonts more! Comment out to use website's fonts dict(name='font', attrs={'class':'footer'}), # remove fonts if you do like your fonts more! Comment out to use website's fonts
dict(name='div', attrs={'class':'newsarticlead'})
] ]
# Remove various tag attributes to improve the look of the ebook pages. # Remove various tag attributes to improve the look of the ebook pages.
remove_attributes = [ 'border', 'cellspacing', 'align', 'cellpadding', 'colspan', remove_attributes = [ 'border', 'cellspacing', 'align', 'cellpadding', 'colspan',
'valign', 'vspace', 'hspace', 'alt', 'width', 'height' ] 'valign', 'vspace', 'hspace', 'alt', 'width', 'height' ]
# Remove the line breaks, # Remove the line breaks as well as href links. Books don't have links generally speaking
preprocess_regexps = [(re.compile(r'<br[ ]*/>', re.IGNORECASE), lambda m: ''), preprocess_regexps = [(re.compile(r'<br[ ]*/>', re.IGNORECASE), lambda m: ''),
(re.compile(r'<br[ ]*clear.*/>', re.IGNORECASE), lambda m: '')] (re.compile(r'<br[ ]*clear.*/>', re.IGNORECASE), lambda m: ''),
(re.compile(r'<a.*?>'), lambda h1: ''),
(re.compile(r'</a>'), lambda h2: '')
]
# Select the feeds that you are interested. # Select the feeds that you are interested.
feeds = [ feeds = [
@ -96,19 +104,20 @@ class MWJournal(BasicNewsRecipe):
# The function is adapted from the Economist recipe # The function is adapted from the Economist recipe
def get_cover_url(self): def get_cover_url(self):
cover_url = None cover_url = None
cover_page_location = 'http://www.mwjournal.com/Journal/' # Cover image is located on this page cover_page_location = 'http://www.mwjournal.com/Journal/' # Cover image is located on this page
soup = self.index_to_soup(cover_page_location) soup = self.index_to_soup(cover_page_location)
cover_item = soup.find('img',attrs={'src':lambda x: x and '/IssueImg/3_MWJ_CurrIss_CoverImg' in x}) # There are three files named cover, we want the highest resolution which is the 3rd image. So we look for the pattern. Remember that the name of the cover image changes every month so we cannot search for the complete name. Instead we are searching for the pattern cover_item = soup.find('img',attrs={'src':lambda x: x and '/IssueImg/3_MWJ_CurrIss_CoverImg' in x}) # There are three files named cover, we want the highest resolution which is the 3rd image. So we look for the pattern. Remember that the name of the cover image changes every month so we cannot search for the complete name. Instead we are searching for the pattern
if cover_item: if cover_item:
cover_url = 'http://www.mwjournal.com' + cover_item['src'].strip() # yeah! we found it. Let's fetch the image file and pass it as cover to calibre cover_url = 'http://www.mwjournal.com' + cover_item['src'].strip() # yeah! we found it. Let's fetch the image file and pass it as cover to calibre
return cover_url return cover_url
def print_version(self, url): def print_version(self, url):
''' if url.find('/Journal/article.asp?HH_ID=') >= 0:
this function uses the print version of the article. Replaces the URL with its print version and fetch that page instead. return self.browser.open_novisit(url).geturl().replace('/Journal/article.asp?HH_ID=', '/Journal/Print.asp?Id=')
''' elif url.find('/News/article.asp?HH_ID=') >= 0:
return url.replace('http://mwjournal.com/Journal/article.asp?HH_ID=', 'http://mwjournal.com/Journal/Print.asp?Id=') return self.browser.open_novisit(url).geturl().replace('/News/article.asp?HH_ID=', '/Journal/Print.asp?Id=')
elif url.find('/Resources/TechLib.asp?HH_ID=') >= 0:
return self.browser.open_novisit(url).geturl().replace('/Resources/TechLib.asp?HH_ID=', '/Resources/PrintRessource.asp?Id=')
def get_browser(self): def get_browser(self):
''' '''
@ -118,9 +127,9 @@ class MWJournal(BasicNewsRecipe):
if self.username is not None and self.password is not None: if self.username is not None and self.password is not None:
url = ('http://www.omeda.com/cgi-win/mwjreg.cgi?m=login') # main login page. url = ('http://www.omeda.com/cgi-win/mwjreg.cgi?m=login') # main login page.
br.open(url) # fetch the 1st login page br.open(url) # fetch the 1st login page
br.select_form('login') # finds the login form br.select_form('login') # finds the login form
br['EMAIL_ADDRESS'] = self.username # fills the username br['EMAIL_ADDRESS'] = self.username # fills the username
br['PASSWORD'] = self.password # fills the password br['PASSWORD'] = self.password # fills the password
raw = br.submit().read() # submit the form and read the 2nd login form raw = br.submit().read() # submit the form and read the 2nd login form
# save it to an htm temp file (from ESPN recipe written by Kovid Goyal kovid@kovidgoyal.net # save it to an htm temp file (from ESPN recipe written by Kovid Goyal kovid@kovidgoyal.net
with TemporaryFile(suffix='.htm') as fname: with TemporaryFile(suffix='.htm') as fname:
@ -128,7 +137,7 @@ class MWJournal(BasicNewsRecipe):
f.write(raw) f.write(raw)
br.open_local_file(fname) br.open_local_file(fname)
br.select_form(nr=0) # finds submit on the 2nd form br.select_form(nr=0) # finds submit on the 2nd form
didwelogin = br.submit().read() # submit it and read the return html didwelogin = br.submit().read() # submit it and read the return html
if 'Welcome ' not in didwelogin: # did it login successfully? Is Username/password correct? if 'Welcome ' not in didwelogin: # did it login successfully? Is Username/password correct?
raise Exception('Failed to login, are you sure your username and password are correct?') raise Exception('Failed to login, are you sure your username and password are correct?')
#login is done #login is done

14
recipes/pambianco.recipe Normal file
View File

@ -0,0 +1,14 @@
from calibre.web.feeds.news import BasicNewsRecipe
class AdvancedUserRecipe1326135591(BasicNewsRecipe):
title = u'Pambianco'
description = 'fashion magazine for professional people'
language = 'it'
oldest_article = 7
max_articles_per_feed = 100
auto_cleanup = True
feeds = [(u'Pambianco', u'http://feeds.feedburner.com/pambianconews/YGXu')]
__author__ = 'faber1971'
__version__ = 'v1.0'
__date__ = '9, January 2011'

View File

@ -0,0 +1,12 @@
from calibre.web.feeds.news import BasicNewsRecipe
class BasicUserRecipe1324913680(BasicNewsRecipe):
title = u'Sivil Dusunce'
language = 'tr'
__author__ = 'asalet_r'
oldest_article = 7
max_articles_per_feed = 20
auto_cleanup = True
feeds = [(u'Sivil Dusunce', u'http://www.sivildusunce.com/feed/')]

View File

@ -0,0 +1,12 @@
from calibre.web.feeds.news import BasicNewsRecipe
class BasicUserRecipe1324739957(BasicNewsRecipe):
title = u'Tasfiye Dergisi'
language = 'tr'
__author__ = 'asalet_r'
oldest_article = 7
max_articles_per_feed = 20
auto_cleanup = True
feeds = [(u'Tasfiye Dergisi', u'http://www.tasfiyedergisi.com/direnen-edebiyat/?feed=rss2')]

View File

@ -74,7 +74,7 @@ class ANDROID(USBMS):
0x0001 : [0x0223, 0x9999], 0x0001 : [0x0223, 0x9999],
0x4e11 : [0x0100, 0x226, 0x227], 0x4e11 : [0x0100, 0x226, 0x227],
0x4e12 : [0x0100, 0x226, 0x227], 0x4e12 : [0x0100, 0x226, 0x227],
0x4e21 : [0x0100, 0x226, 0x227], 0x4e21 : [0x0100, 0x226, 0x227, 0x231],
0xb058 : [0x0222, 0x226, 0x227], 0xb058 : [0x0222, 0x226, 0x227],
0x0ff9 : [0x0226], 0x0ff9 : [0x0226],
}, },
@ -175,7 +175,7 @@ class ANDROID(USBMS):
'GT-S5830_CARD', 'GT-S5570_CARD', 'MB870', 'MID7015A', 'GT-S5830_CARD', 'GT-S5570_CARD', 'MB870', 'MID7015A',
'ALPANDIGITAL', 'ANDROID_MID', 'VTAB1008', 'EMX51_BBG_ANDROI', 'ALPANDIGITAL', 'ANDROID_MID', 'VTAB1008', 'EMX51_BBG_ANDROI',
'UMS', '.K080', 'P990', 'LTE', 'MB853', 'GT-S5660_CARD', 'A107', 'UMS', '.K080', 'P990', 'LTE', 'MB853', 'GT-S5660_CARD', 'A107',
'GT-I9003_CARD', 'XT912'] 'GT-I9003_CARD', 'XT912', 'FILE-CD_GADGET']
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897', WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD', 'FILE-STOR_GADGET', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD',
'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD', 'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD',

View File

@ -68,8 +68,8 @@ def check_command_line_options(parser, args, log):
raise SystemExit(1) raise SystemExit(1)
output = args[2] output = args[2]
if output.startswith('.') and (output != '.' and not if (output.startswith('.') and output[:2] not in {'..', '.'} and '/' not in
output.startswith('..')): output and '\\' not in output):
output = os.path.splitext(os.path.basename(input))[0]+output output = os.path.splitext(os.path.basename(input))[0]+output
output = os.path.abspath(output) output = os.path.abspath(output)

View File

@ -191,9 +191,9 @@ class CanonicalFragmentIdentifier
if target.currentTime == undefined if target.currentTime == undefined
return return
if target.readyState == 4 or target.readyState == "complete" if target.readyState == 4 or target.readyState == "complete"
target.currentTime = val target.currentTime = val + 0
else else
fn = -> target.currentTime = val fn = ()-> target.currentTime = val
target.addEventListener("canplay", fn, false) target.addEventListener("canplay", fn, false)
#}}} #}}}

Binary file not shown.

View File

@ -114,6 +114,15 @@
<p>Try clicking at different points along the image. Also try changing the magnification and then hitting reload.</p> <p>Try clicking at different points along the image. Also try changing the magnification and then hitting reload.</p>
<img src="marker.png" width="150" height="200" alt="Test Image" style="border: solid 1px black"/> <img src="marker.png" width="150" height="200" alt="Test Image" style="border: solid 1px black"/>
<h2>Video</h2>
<p>Try clicking on this video while it is playing. The page should
reload with the video paused at the point it was at when you
clicked. To play the video you should right click on it and select
play (otherwise the click will cause a reload). This is currently
broken because of issues in the python server use to serve test
content. I lack the patience to track down the bug. </p>
<video width="320" height="240" controls="controls" preload="auto" src="birds.mp4" type="video/mp4" />
</div> </div>
<img id="marker" style="position: absolute; display:none; z-index:10" src="marker.png" alt="Marker" /> <img id="marker" style="position: absolute; display:none; z-index:10" src="marker.png" alt="Marker" />
</body> </body>

View File

@ -3179,6 +3179,8 @@ class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
def create_book_entry(self, mi, cover=None, add_duplicates=True, def create_book_entry(self, mi, cover=None, add_duplicates=True,
force_id=None): force_id=None):
if mi.tags:
mi.tags = list(mi.tags)
self._add_newbook_tag(mi) self._add_newbook_tag(mi)
if not add_duplicates and self.has_book(mi): if not add_duplicates and self.has_book(mi):
return None return None

View File

@ -164,6 +164,7 @@ class Handler(SimpleHTTPServer.SimpleHTTPRequestHandler): # {{{
class HTTPD(SocketServer.TCPServer): class HTTPD(SocketServer.TCPServer):
allow_reuse_address = True allow_reuse_address = True
protocol_version = 'HTTP/1.1'
# }}} # }}}