ahmia.py 2.3 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182
  1. """
  2. Ahmia (Onions)
  3. @website http://msydqstlz2kzerdg.onion
  4. @provides-api no
  5. @using-api no
  6. @results HTML
  7. @stable no
  8. @parse url, title, content
  9. """
  10. from urllib.parse import urlencode, urlparse, parse_qs
  11. from lxml.html import fromstring
  12. from searx.engines.xpath import extract_url, extract_text
  13. # engine config
  14. categories = ['onions']
  15. paging = True
  16. page_size = 10
  17. # search url
  18. search_url = 'http://msydqstlz2kzerdg.onion/search/?{query}'
  19. time_range_support = True
  20. time_range_dict = {'day': 1,
  21. 'week': 7,
  22. 'month': 30}
  23. # xpaths
  24. results_xpath = '//li[@class="result"]'
  25. url_xpath = './h4/a/@href'
  26. title_xpath = './h4/a[1]'
  27. content_xpath = './/p[1]'
  28. correction_xpath = '//*[@id="didYouMean"]//a'
  29. number_of_results_xpath = '//*[@id="totalResults"]'
  30. def request(query, params):
  31. params['url'] = search_url.format(query=urlencode({'q': query}))
  32. if params['time_range'] in time_range_dict:
  33. params['url'] += '&' + urlencode({'d': time_range_dict[params['time_range']]})
  34. return params
  35. def response(resp):
  36. results = []
  37. dom = fromstring(resp.text)
  38. # trim results so there's not way too many at once
  39. first_result_index = page_size * (resp.search_params.get('pageno', 1) - 1)
  40. all_results = dom.xpath(results_xpath)
  41. trimmed_results = all_results[first_result_index:first_result_index + page_size]
  42. # get results
  43. for result in trimmed_results:
  44. # remove ahmia url and extract the actual url for the result
  45. raw_url = extract_url(result.xpath(url_xpath), search_url)
  46. cleaned_url = parse_qs(urlparse(raw_url).query).get('redirect_url', [''])[0]
  47. title = extract_text(result.xpath(title_xpath))
  48. content = extract_text(result.xpath(content_xpath))
  49. results.append({'url': cleaned_url,
  50. 'title': title,
  51. 'content': content,
  52. 'is_onion': True})
  53. # get spelling corrections
  54. for correction in dom.xpath(correction_xpath):
  55. results.append({'correction': extract_text(correction)})
  56. # get number of results
  57. number_of_results = dom.xpath(number_of_results_xpath)
  58. if number_of_results:
  59. try:
  60. results.append({'number_of_results': int(extract_text(number_of_results))})
  61. except:
  62. pass
  63. return results