yahoo.py 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """
  3. Yahoo (Web)
  4. """
  5. from urllib.parse import unquote, urlencode
  6. from lxml import html
  7. from searx.utils import extract_text, extract_url, match_language, eval_xpath
  8. # about
  9. about = {
  10. "website": 'https://search.yahoo.com/',
  11. "wikidata_id": None,
  12. "official_api_documentation": 'https://developer.yahoo.com/api/',
  13. "use_official_api": False,
  14. "require_api_key": False,
  15. "results": 'HTML',
  16. }
  17. # engine dependent config
  18. categories = ['general']
  19. paging = True
  20. language_support = True
  21. time_range_support = True
  22. # search-url
  23. base_url = 'https://search.yahoo.com/'
  24. search_url = 'search?{query}&b={offset}&fl=1&vl=lang_{lang}'
  25. search_url_with_time = 'search?{query}&b={offset}&fl=1&vl=lang_{lang}&age={age}&btf={btf}&fr2=time'
  26. supported_languages_url = 'https://search.yahoo.com/web/advanced'
  27. # specific xpath variables
  28. results_xpath = "//div[contains(concat(' ', normalize-space(@class), ' '), ' Sr ')]"
  29. url_xpath = './/h3/a/@href'
  30. title_xpath = './/h3/a'
  31. content_xpath = './/div[contains(@class, "compText")]'
  32. suggestion_xpath = "//div[contains(concat(' ', normalize-space(@class), ' '), ' AlsoTry ')]//a"
  33. time_range_dict = {'day': ['1d', 'd'],
  34. 'week': ['1w', 'w'],
  35. 'month': ['1m', 'm']}
  36. language_aliases = {'zh-CN': 'zh-CHS', 'zh-TW': 'zh-CHT', 'zh-HK': 'zh-CHT'}
  37. # remove yahoo-specific tracking-url
  38. def parse_url(url_string):
  39. endings = ['/RS', '/RK']
  40. endpositions = []
  41. start = url_string.find('http', url_string.find('/RU=') + 1)
  42. for ending in endings:
  43. endpos = url_string.rfind(ending)
  44. if endpos > -1:
  45. endpositions.append(endpos)
  46. if start == 0 or len(endpositions) == 0:
  47. return url_string
  48. else:
  49. end = min(endpositions)
  50. return unquote(url_string[start:end])
  51. def _get_url(query, offset, language, time_range):
  52. if time_range in time_range_dict:
  53. return base_url + search_url_with_time.format(offset=offset,
  54. query=urlencode({'p': query}),
  55. lang=language,
  56. age=time_range_dict[time_range][0],
  57. btf=time_range_dict[time_range][1])
  58. return base_url + search_url.format(offset=offset,
  59. query=urlencode({'p': query}),
  60. lang=language)
  61. def _get_language(params):
  62. if params['language'] == 'all':
  63. return 'en'
  64. language = match_language(params['language'], supported_languages, language_aliases)
  65. if language not in language_aliases.values():
  66. language = language.split('-')[0]
  67. language = language.replace('-', '_').lower()
  68. return language
  69. # do search-request
  70. def request(query, params):
  71. if params['time_range'] and params['time_range'] not in time_range_dict:
  72. return params
  73. offset = (params['pageno'] - 1) * 10 + 1
  74. language = _get_language(params)
  75. params['url'] = _get_url(query, offset, language, params['time_range'])
  76. # TODO required?
  77. params['cookies']['sB'] = 'fl=1&vl=lang_{lang}&sh=1&rw=new&v=1'\
  78. .format(lang=language)
  79. return params
  80. # get response from search-request
  81. def response(resp):
  82. results = []
  83. dom = html.fromstring(resp.text)
  84. try:
  85. results_num = int(eval_xpath(dom, '//div[@class="compPagination"]/span[last()]/text()')[0]
  86. .split()[0].replace(',', ''))
  87. results.append({'number_of_results': results_num})
  88. except:
  89. pass
  90. # parse results
  91. for result in eval_xpath(dom, results_xpath):
  92. try:
  93. url = parse_url(extract_url(eval_xpath(result, url_xpath), search_url))
  94. title = extract_text(eval_xpath(result, title_xpath)[0])
  95. except:
  96. continue
  97. content = extract_text(eval_xpath(result, content_xpath)[0])
  98. # append result
  99. results.append({'url': url,
  100. 'title': title,
  101. 'content': content})
  102. # if no suggestion found, return results
  103. suggestions = eval_xpath(dom, suggestion_xpath)
  104. if not suggestions:
  105. return results
  106. # parse suggestion
  107. for suggestion in suggestions:
  108. # append suggestion
  109. results.append({'suggestion': extract_text(suggestion)})
  110. # return results
  111. return results
  112. # get supported languages from their site
  113. def _fetch_supported_languages(resp):
  114. supported_languages = []
  115. dom = html.fromstring(resp.text)
  116. options = eval_xpath(dom, '//div[@id="yschlang"]/span/label/input')
  117. for option in options:
  118. code_parts = eval_xpath(option, './@value')[0][5:].split('_')
  119. if len(code_parts) == 2:
  120. code = code_parts[0] + '-' + code_parts[1].upper()
  121. else:
  122. code = code_parts[0]
  123. supported_languages.append(code)
  124. return supported_languages