http->https

This commit is contained in:
Kovid Goyal 2017-02-17 19:39:06 +05:30
parent 73b4ff84b7
commit 0858d97f82
2 changed files with 6 additions and 6 deletions

View File

@ -645,7 +645,7 @@ class NYTimes(BasicNewsRecipe):
continue continue
try: try:
soup = self.index_to_soup( soup = self.index_to_soup(
'http://www.nytimes.com/pages/' + index_url + '/index.html') 'https://www.nytimes.com/pages/' + index_url + '/index.html')
except: except:
continue continue
print 'Index URL: ' + 'http://www.nytimes.com/pages/' + index_url + '/index.html' print 'Index URL: ' + 'http://www.nytimes.com/pages/' + index_url + '/index.html'
@ -675,7 +675,7 @@ class NYTimes(BasicNewsRecipe):
def parse_todays_index(self): def parse_todays_index(self):
soup = self.index_to_soup( soup = self.index_to_soup(
'http://www.nytimes.com/pages/todayspaper/index.html') 'https://www.nytimes.com/pages/todayspaper/index.html')
skipping = False skipping = False
# Find each article # Find each article
for div in soup.findAll(True, for div in soup.findAll(True,
@ -708,7 +708,7 @@ class NYTimes(BasicNewsRecipe):
def parse_headline_index(self): def parse_headline_index(self):
soup = self.index_to_soup( soup = self.index_to_soup(
'http://www.nytimes.com/pages/todaysheadlines/') 'https://www.nytimes.com/pages/todaysheadlines/')
pubdate = strftime('%a, %d %b') pubdate = strftime('%a, %d %b')
section = None section = None
articles = [] articles = []

View File

@ -653,7 +653,7 @@ class NYTimes(BasicNewsRecipe):
continue continue
try: try:
soup = self.index_to_soup( soup = self.index_to_soup(
'http://www.nytimes.com/pages/' + index_url + '/index.html') 'https://www.nytimes.com/pages/' + index_url + '/index.html')
except: except:
continue continue
print 'Index URL: ' + 'http://www.nytimes.com/pages/' + index_url + '/index.html' print 'Index URL: ' + 'http://www.nytimes.com/pages/' + index_url + '/index.html'
@ -683,7 +683,7 @@ class NYTimes(BasicNewsRecipe):
def parse_todays_index(self): def parse_todays_index(self):
soup = self.index_to_soup( soup = self.index_to_soup(
'http://www.nytimes.com/pages/todayspaper/index.html') 'https://www.nytimes.com/pages/todayspaper/index.html')
skipping = False skipping = False
# Find each article # Find each article
for div in soup.findAll(True, for div in soup.findAll(True,
@ -716,7 +716,7 @@ class NYTimes(BasicNewsRecipe):
def parse_headline_index(self): def parse_headline_index(self):
soup = self.index_to_soup( soup = self.index_to_soup(
'http://www.nytimes.com/pages/todaysheadlines/') 'https://www.nytimes.com/pages/todaysheadlines/')
pubdate = strftime('%a, %d %b') pubdate = strftime('%a, %d %b')
section = None section = None
articles = [] articles = []