mirror of
https://github.com/kovidgoyal/calibre.git
synced 2025-07-09 03:04:10 -04:00
Merge branch 'master' of https://github.com/t3d/calibre
This commit is contained in:
commit
f5335a41ce
@ -24,20 +24,27 @@ class GN(BasicNewsRecipe):
|
|||||||
doc = html.fromstring(raw)
|
doc = html.fromstring(raw)
|
||||||
page = doc.xpath('//div[@class="c"]//div[@class="search-result"]/div[1]/div[2]/h1//a/@href')
|
page = doc.xpath('//div[@class="c"]//div[@class="search-result"]/div[1]/div[2]/h1//a/@href')
|
||||||
|
|
||||||
return page[4]
|
if time.strftime("%w") in ['3','4']:
|
||||||
|
return page[5]
|
||||||
|
else:
|
||||||
|
return page[4]
|
||||||
|
|
||||||
def parse_index(self):
|
def parse_index(self):
|
||||||
soup = self.index_to_soup('http://gosc.pl' + self.find_last_issue())
|
soup = self.index_to_soup('http://gosc.pl' + self.find_last_issue())
|
||||||
self.cover_url = 'http://www.gosc.pl' + soup.find('div',attrs={'class':'fl-w100 release-wp'}).findAll('a')[-4].contents[0]['src']
|
self.cover_url = 'http://www.gosc.pl' + soup.find('div',attrs={'class':'fl-w100 release-wp'}).findAll('a')[-4].contents[0]['src']
|
||||||
feeds = []
|
feeds = []
|
||||||
# wstepniak
|
enlisted = []
|
||||||
a = soup.find('div',attrs={'class':'release-wp-b'}).find('a')
|
# editorial:
|
||||||
|
a = soup.find('div',attrs={'class':'release-wp-b'})
|
||||||
|
art = a.find('a')
|
||||||
articles = [
|
articles = [
|
||||||
{'title' : self.tag_to_string(a),
|
{'title' : self.tag_to_string(art),
|
||||||
'url' : 'http://www.gosc.pl' + a['href']
|
'url' : 'http://www.gosc.pl' + art['href'],
|
||||||
|
'description' : self.tag_to_string(a.find('p',attrs={'class':'b lead'}))
|
||||||
}]
|
}]
|
||||||
feeds.append((u'Wstępniak',articles))
|
feeds.append((u'Wstępniak',articles))
|
||||||
# kategorie
|
enlisted.append(articles[0].get('url'))
|
||||||
|
# columns:
|
||||||
for addr in soup.findAll('a',attrs={'href':re.compile('kategoria')}):
|
for addr in soup.findAll('a',attrs={'href':re.compile('kategoria')}):
|
||||||
if addr.string != u'wszystkie artyku\u0142y z tej kategorii \xbb':
|
if addr.string != u'wszystkie artyku\u0142y z tej kategorii \xbb':
|
||||||
main_block = self.index_to_soup('http://www.gosc.pl' + addr['href'])
|
main_block = self.index_to_soup('http://www.gosc.pl' + addr['href'])
|
||||||
@ -45,6 +52,20 @@ class GN(BasicNewsRecipe):
|
|||||||
if len(articles) > 0:
|
if len(articles) > 0:
|
||||||
section = addr.string
|
section = addr.string
|
||||||
feeds.append((section, articles))
|
feeds.append((section, articles))
|
||||||
|
enlisted.extend(list(article.get('url') for article in articles))
|
||||||
|
# not assigned content:
|
||||||
|
page = 1
|
||||||
|
not_assigned = []
|
||||||
|
while True:
|
||||||
|
soup = self.index_to_soup('http://gosc.pl' + self.find_last_issue().replace('przeglad','wszystko') + '/' + str(page))
|
||||||
|
articles = list(self.find_articles(soup))
|
||||||
|
not_assigned.extend([ x for x in articles if x.get('url') not in enlisted ])
|
||||||
|
page+=1
|
||||||
|
pages = soup.find('span', attrs={'class':'pgr_nrs'})
|
||||||
|
if str(page) not in [self.tag_to_string(x)[1] for x in pages.findAll('a')]:
|
||||||
|
break
|
||||||
|
|
||||||
|
feeds.insert(1,(u'Nieprzypisane', not_assigned))
|
||||||
return feeds
|
return feeds
|
||||||
|
|
||||||
def find_articles(self, main_block):
|
def find_articles(self, main_block):
|
||||||
@ -52,7 +73,9 @@ class GN(BasicNewsRecipe):
|
|||||||
art = a.find('a')
|
art = a.find('a')
|
||||||
yield {
|
yield {
|
||||||
'title' : self.tag_to_string(art),
|
'title' : self.tag_to_string(art),
|
||||||
'url' : 'http://www.gosc.pl' + art['href']
|
'url' : 'http://www.gosc.pl' + art['href'],
|
||||||
|
'date' : self.tag_to_string(a.find('p', attrs={'class':'sr-date'})),
|
||||||
|
'description' : self.tag_to_string(a.find('p', attrs={'class':'sr-lead'}))
|
||||||
}
|
}
|
||||||
|
|
||||||
def append_page(self, soup, appendtag):
|
def append_page(self, soup, appendtag):
|
||||||
@ -77,7 +100,7 @@ class GN(BasicNewsRecipe):
|
|||||||
for r in soup.findAll(attrs={'class':['di_dr', 'doc_image']}):
|
for r in soup.findAll(attrs={'class':['di_dr', 'doc_image']}):
|
||||||
del r['style']
|
del r['style']
|
||||||
for r in soup.findAll(attrs={'class':'cm-i-a'}):
|
for r in soup.findAll(attrs={'class':'cm-i-a'}):
|
||||||
r.replaceWith( r.prettify() + '<div style="clear:both"></div>')
|
r.replaceWith( '<div style="clear:both"></div>' + r.prettify() + '<div style="clear:both"></div>')
|
||||||
return soup
|
return soup
|
||||||
|
|
||||||
keep_only_tags = [
|
keep_only_tags = [
|
||||||
@ -88,6 +111,7 @@ class GN(BasicNewsRecipe):
|
|||||||
dict(name='p', attrs={'class':['r tr', 'l l-2', 'wykop', 'tags']}),
|
dict(name='p', attrs={'class':['r tr', 'l l-2', 'wykop', 'tags']}),
|
||||||
dict(name='div', attrs={'class':['doc_actions', 'cf', 'fr1_cl']}),
|
dict(name='div', attrs={'class':['doc_actions', 'cf', 'fr1_cl']}),
|
||||||
dict(name='div', attrs={'id':'vote'}),
|
dict(name='div', attrs={'id':'vote'}),
|
||||||
|
dict(name='link'),
|
||||||
dict(name='a', attrs={'class':'img_enlarge'})
|
dict(name='a', attrs={'class':'img_enlarge'})
|
||||||
]
|
]
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user