yahoo.py 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. """
  2. Yahoo (Web)
  3. @website https://search.yahoo.com/web
  4. @provide-api yes (https://developer.yahoo.com/boss/search/),
  5. $0.80/1000 queries
  6. @using-api no (because pricing)
  7. @results HTML (using search portal)
  8. @stable no (HTML can change)
  9. @parse url, title, content, suggestion
  10. """
  11. from urllib.parse import unquote, urlencode
  12. from lxml import html
  13. from searx.utils import extract_text, extract_url, match_language, eval_xpath
  14. # engine dependent config
  15. categories = ['general']
  16. paging = True
  17. language_support = True
  18. time_range_support = True
  19. # search-url
  20. base_url = 'https://search.yahoo.com/'
  21. search_url = 'search?{query}&b={offset}&fl=1&vl=lang_{lang}'
  22. search_url_with_time = 'search?{query}&b={offset}&fl=1&vl=lang_{lang}&age={age}&btf={btf}&fr2=time'
  23. supported_languages_url = 'https://search.yahoo.com/web/advanced'
  24. # specific xpath variables
  25. results_xpath = "//div[contains(concat(' ', normalize-space(@class), ' '), ' Sr ')]"
  26. url_xpath = './/h3/a/@href'
  27. title_xpath = './/h3/a'
  28. content_xpath = './/div[contains(@class, "compText")]'
  29. suggestion_xpath = "//div[contains(concat(' ', normalize-space(@class), ' '), ' AlsoTry ')]//a"
  30. time_range_dict = {'day': ['1d', 'd'],
  31. 'week': ['1w', 'w'],
  32. 'month': ['1m', 'm']}
  33. language_aliases = {'zh-CN': 'zh-CHS', 'zh-TW': 'zh-CHT', 'zh-HK': 'zh-CHT'}
  34. # remove yahoo-specific tracking-url
  35. def parse_url(url_string):
  36. endings = ['/RS', '/RK']
  37. endpositions = []
  38. start = url_string.find('http', url_string.find('/RU=') + 1)
  39. for ending in endings:
  40. endpos = url_string.rfind(ending)
  41. if endpos > -1:
  42. endpositions.append(endpos)
  43. if start == 0 or len(endpositions) == 0:
  44. return url_string
  45. else:
  46. end = min(endpositions)
  47. return unquote(url_string[start:end])
  48. def _get_url(query, offset, language, time_range):
  49. if time_range in time_range_dict:
  50. return base_url + search_url_with_time.format(offset=offset,
  51. query=urlencode({'p': query}),
  52. lang=language,
  53. age=time_range_dict[time_range][0],
  54. btf=time_range_dict[time_range][1])
  55. return base_url + search_url.format(offset=offset,
  56. query=urlencode({'p': query}),
  57. lang=language)
  58. def _get_language(params):
  59. if params['language'] == 'all':
  60. return 'en'
  61. language = match_language(params['language'], supported_languages, language_aliases)
  62. if language not in language_aliases.values():
  63. language = language.split('-')[0]
  64. language = language.replace('-', '_').lower()
  65. return language
  66. # do search-request
  67. def request(query, params):
  68. if params['time_range'] and params['time_range'] not in time_range_dict:
  69. return params
  70. offset = (params['pageno'] - 1) * 10 + 1
  71. language = _get_language(params)
  72. params['url'] = _get_url(query, offset, language, params['time_range'])
  73. # TODO required?
  74. params['cookies']['sB'] = 'fl=1&vl=lang_{lang}&sh=1&rw=new&v=1'\
  75. .format(lang=language)
  76. return params
  77. # get response from search-request
  78. def response(resp):
  79. results = []
  80. dom = html.fromstring(resp.text)
  81. try:
  82. results_num = int(eval_xpath(dom, '//div[@class="compPagination"]/span[last()]/text()')[0]
  83. .split()[0].replace(',', ''))
  84. results.append({'number_of_results': results_num})
  85. except:
  86. pass
  87. # parse results
  88. for result in eval_xpath(dom, results_xpath):
  89. try:
  90. url = parse_url(extract_url(eval_xpath(result, url_xpath), search_url))
  91. title = extract_text(eval_xpath(result, title_xpath)[0])
  92. except:
  93. continue
  94. content = extract_text(eval_xpath(result, content_xpath)[0])
  95. # append result
  96. results.append({'url': url,
  97. 'title': title,
  98. 'content': content})
  99. # if no suggestion found, return results
  100. suggestions = eval_xpath(dom, suggestion_xpath)
  101. if not suggestions:
  102. return results
  103. # parse suggestion
  104. for suggestion in suggestions:
  105. # append suggestion
  106. results.append({'suggestion': extract_text(suggestion)})
  107. # return results
  108. return results
  109. # get supported languages from their site
  110. def _fetch_supported_languages(resp):
  111. supported_languages = []
  112. dom = html.fromstring(resp.text)
  113. options = eval_xpath(dom, '//div[@id="yschlang"]/span/label/input')
  114. for option in options:
  115. code_parts = eval_xpath(option, './@value')[0][5:].split('_')
  116. if len(code_parts) == 2:
  117. code = code_parts[0] + '-' + code_parts[1].upper()
  118. else:
  119. code = code_parts[0]
  120. supported_languages.append(code)
  121. return supported_languages