summaryrefslogtreecommitdiff
path: root/searx/results.py
diff options
context:
space:
mode:
authorMarkus Heiser <markus.heiser@darmarit.de>2021-12-27 09:26:22 +0100
committerMarkus Heiser <markus.heiser@darmarit.de>2021-12-27 09:26:22 +0100
commit3d96a9839a12649874b6d4cf9466bd3616b0a03c (patch)
treee7d54d1e345b1e792d538ddc250f4827bb2fd9b9 /searx/results.py
parentfcdc2c2cd26e24c2aa3f064d93cee3e29dc2a30c (diff)
[format.python] initial formatting of the python code
This patch was generated by black [1]:: make format.python [1] https://github.com/psf/black Signed-off-by: Markus Heiser <markus.heiser@darmarit.de>
Diffstat (limited to 'searx/results.py')
-rw-r--r--searx/results.py65
1 files changed, 37 insertions, 28 deletions
diff --git a/searx/results.py b/searx/results.py
index 10a26aa3..6ab751c5 100644
--- a/searx/results.py
+++ b/searx/results.py
@@ -47,12 +47,8 @@ def compare_urls(url_a, url_b):
return False
# remove / from the end of the url if required
- path_a = url_a.path[:-1]\
- if url_a.path.endswith('/')\
- else url_a.path
- path_b = url_b.path[:-1]\
- if url_b.path.endswith('/')\
- else url_b.path
+ path_a = url_a.path[:-1] if url_a.path.endswith('/') else url_a.path
+ path_b = url_b.path[:-1] if url_b.path.endswith('/') else url_b.path
return unquote(path_a) == unquote(path_b)
@@ -83,8 +79,9 @@ def merge_two_infoboxes(infobox1, infobox2):
parsed_url2 = urlparse(url2.get('url', ''))
entity_url2 = url2.get('entity')
for url1 in urls1:
- if (entity_url2 is not None and url1.get('entity') == entity_url2)\
- or compare_urls(urlparse(url1.get('url', '')), parsed_url2):
+ if (entity_url2 is not None and url1.get('entity') == entity_url2) or compare_urls(
+ urlparse(url1.get('url', '')), parsed_url2
+ ):
unique_url = False
break
if unique_url:
@@ -115,8 +112,7 @@ def merge_two_infoboxes(infobox1, infobox2):
attributeSet.add(entity)
for attribute in infobox2.get('attributes', []):
- if attribute.get('label') not in attributeSet\
- and attribute.get('entity') not in attributeSet:
+ if attribute.get('label') not in attributeSet and attribute.get('entity') not in attributeSet:
attributes1.append(attribute)
if 'content' in infobox2:
@@ -144,9 +140,22 @@ def result_score(result):
class ResultContainer:
"""docstring for ResultContainer"""
- __slots__ = '_merged_results', 'infoboxes', 'suggestions', 'answers', 'corrections', '_number_of_results',\
- '_closed', 'paging', 'unresponsive_engines', 'timings', 'redirect_url', 'engine_data', 'on_result',\
- '_lock'
+ __slots__ = (
+ '_merged_results',
+ 'infoboxes',
+ 'suggestions',
+ 'answers',
+ 'corrections',
+ '_number_of_results',
+ '_closed',
+ 'paging',
+ 'unresponsive_engines',
+ 'timings',
+ 'redirect_url',
+ 'engine_data',
+ 'on_result',
+ '_lock',
+ )
def __init__(self):
super().__init__()
@@ -208,8 +217,7 @@ class ResultContainer:
if engine_name in engines:
histogram_observe(standard_result_count, 'engine', engine_name, 'result', 'count')
- if not self.paging and standard_result_count > 0 and engine_name in engines\
- and engines[engine_name].paging:
+ if not self.paging and standard_result_count > 0 and engine_name in engines and engines[engine_name].paging:
self.paging = True
def _merge_infobox(self, infobox):
@@ -248,8 +256,7 @@ class ResultContainer:
return True
def _normalize_url_result(self, result):
- """Return True if the result is valid
- """
+ """Return True if the result is valid"""
result['parsed_url'] = urlparse(result['url'])
# if the result has no scheme, use http as default
@@ -280,8 +287,9 @@ class ResultContainer:
for merged_result in self._merged_results:
if 'parsed_url' not in merged_result:
continue
- if compare_urls(result['parsed_url'], merged_result['parsed_url'])\
- and result_template == merged_result.get('template'):
+ if compare_urls(result['parsed_url'], merged_result['parsed_url']) and result_template == merged_result.get(
+ 'template'
+ ):
if result_template != 'images.html':
# not an image, same template, same url : it's a duplicate
return merged_result
@@ -294,8 +302,7 @@ class ResultContainer:
def __merge_duplicated_http_result(self, duplicated, result, position):
# using content with more text
- if result_content_len(result.get('content', '')) >\
- result_content_len(duplicated.get('content', '')):
+ if result_content_len(result.get('content', '')) > result_content_len(duplicated.get('content', '')):
duplicated['content'] = result['content']
# merge all result's parameters not found in duplicate
@@ -341,18 +348,20 @@ class ResultContainer:
res['category'] = engine.categories[0] if len(engine.categories) > 0 else ''
# FIXME : handle more than one category per engine
- category = res['category']\
- + ':' + res.get('template', '')\
- + ':' + ('img_src' if 'img_src' in res or 'thumbnail' in res else '')
+ category = (
+ res['category']
+ + ':'
+ + res.get('template', '')
+ + ':'
+ + ('img_src' if 'img_src' in res or 'thumbnail' in res else '')
+ )
- current = None if category not in categoryPositions\
- else categoryPositions[category]
+ current = None if category not in categoryPositions else categoryPositions[category]
# group with previous results using the same category
# if the group can accept more result and is not too far
# from the current position
- if current is not None and (current['count'] > 0)\
- and (len(gresults) - current['index'] < 20):
+ if current is not None and (current['count'] > 0) and (len(gresults) - current['index'] < 20):
# group with the previous results using
# the same category with this one
index = current['index']