mirror of
				https://github.com/searxng/searxng.git
				synced 2025-10-31 18:47:07 -04:00 
			
		
		
		
	
		
			
				
	
	
		
			143 lines
		
	
	
		
			4.0 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			143 lines
		
	
	
		
			4.0 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| # import htmlentitydefs
 | |
| from codecs import getincrementalencoder
 | |
| from HTMLParser import HTMLParser
 | |
| from random import choice
 | |
| 
 | |
| from searx.version import VERSION_STRING
 | |
| from searx import settings
 | |
| 
 | |
| import cStringIO
 | |
| import csv
 | |
| import os
 | |
| import re
 | |
| 
 | |
| ua_versions = ('29.0',
 | |
|                '30.0',
 | |
|                '31.0',
 | |
|                '32.0',
 | |
|                '33.0')
 | |
| 
 | |
| ua_os = ('Windows NT 6.3; WOW64',
 | |
|          'X11; Linux x86_64',
 | |
|          'X11; Linux x86')
 | |
| 
 | |
| ua = "Mozilla/5.0 ({os}) Gecko/20100101 Firefox/{version}"
 | |
| 
 | |
| 
 | |
| def gen_useragent():
 | |
|     # TODO
 | |
|     return ua.format(os=choice(ua_os), version=choice(ua_versions))
 | |
| 
 | |
| 
 | |
| def searx_useragent():
 | |
|     return 'searx/{searx_version} {suffix}'.format(searx_version=VERSION_STRING,
 | |
|                                           suffix=settings['server'].get('useragent_suffix', ''))
 | |
| 
 | |
| 
 | |
| def highlight_content(content, query):
 | |
| 
 | |
|     if not content:
 | |
|         return None
 | |
|     # ignoring html contents
 | |
|     # TODO better html content detection
 | |
|     if content.find('<') != -1:
 | |
|         return content
 | |
| 
 | |
|     query = query.decode('utf-8')
 | |
|     if content.lower().find(query.lower()) > -1:
 | |
|         query_regex = u'({0})'.format(re.escape(query))
 | |
|         content = re.sub(query_regex, '<span class="highlight">\\1</span>',
 | |
|                          content, flags=re.I | re.U)
 | |
|     else:
 | |
|         regex_parts = []
 | |
|         for chunk in query.split():
 | |
|             if len(chunk) == 1:
 | |
|                 regex_parts.append(u'\W+{0}\W+'.format(re.escape(chunk)))
 | |
|             else:
 | |
|                 regex_parts.append(u'{0}'.format(re.escape(chunk)))
 | |
|         query_regex = u'({0})'.format('|'.join(regex_parts))
 | |
|         content = re.sub(query_regex, '<span class="highlight">\\1</span>',
 | |
|                          content, flags=re.I | re.U)
 | |
| 
 | |
|     return content
 | |
| 
 | |
| 
 | |
| class HTMLTextExtractor(HTMLParser):
 | |
|     def __init__(self):
 | |
|         HTMLParser.__init__(self)
 | |
|         self.result = []
 | |
| 
 | |
|     def handle_data(self, d):
 | |
|         self.result.append(d)
 | |
| 
 | |
|     def handle_charref(self, number):
 | |
|         if number[0] in (u'x', u'X'):
 | |
|             codepoint = int(number[1:], 16)
 | |
|         else:
 | |
|             codepoint = int(number)
 | |
|         self.result.append(unichr(codepoint))
 | |
| 
 | |
|     def handle_entityref(self, name):
 | |
|         # codepoint = htmlentitydefs.name2codepoint[name]
 | |
|         # self.result.append(unichr(codepoint))
 | |
|         self.result.append(name)
 | |
| 
 | |
|     def get_text(self):
 | |
|         return u''.join(self.result)
 | |
| 
 | |
| 
 | |
| def html_to_text(html):
 | |
|     s = HTMLTextExtractor()
 | |
|     s.feed(html)
 | |
|     return s.get_text()
 | |
| 
 | |
| 
 | |
| class UnicodeWriter:
 | |
|     """
 | |
|     A CSV writer which will write rows to CSV file "f",
 | |
|     which is encoded in the given encoding.
 | |
|     """
 | |
| 
 | |
|     def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
 | |
|         # Redirect output to a queue
 | |
|         self.queue = cStringIO.StringIO()
 | |
|         self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
 | |
|         self.stream = f
 | |
|         self.encoder = getincrementalencoder(encoding)()
 | |
| 
 | |
|     def writerow(self, row):
 | |
|         unicode_row = []
 | |
|         for col in row:
 | |
|             if type(col) == str or type(col) == unicode:
 | |
|                 unicode_row.append(col.encode('utf-8').strip())
 | |
|             else:
 | |
|                 unicode_row.append(col)
 | |
|         self.writer.writerow(unicode_row)
 | |
|         # Fetch UTF-8 output from the queue ...
 | |
|         data = self.queue.getvalue()
 | |
|         data = data.decode("utf-8")
 | |
|         # ... and reencode it into the target encoding
 | |
|         data = self.encoder.encode(data)
 | |
|         # write to the target stream
 | |
|         self.stream.write(data)
 | |
|         # empty queue
 | |
|         self.queue.truncate(0)
 | |
| 
 | |
|     def writerows(self, rows):
 | |
|         for row in rows:
 | |
|             self.writerow(row)
 | |
| 
 | |
| 
 | |
| def get_themes(root):
 | |
|     """Returns available themes list."""
 | |
| 
 | |
|     static_path = os.path.join(root, 'static')
 | |
|     static_names = set(os.listdir(static_path))
 | |
|     templates_path = os.path.join(root, 'templates')
 | |
|     templates_names = set(os.listdir(templates_path))
 | |
| 
 | |
|     themes = []
 | |
|     for name in static_names.intersection(templates_names):
 | |
|         themes += [name]
 | |
|     return static_path, templates_path, themes
 |