bing.py 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """Bing (Web)
  4. - https://github.com/searx/searx/issues/2019#issuecomment-648227442
  5. """
  6. import re
  7. from urllib.parse import urlencode, urlparse, parse_qs
  8. from lxml import html
  9. from searx.utils import eval_xpath, extract_text, match_language
  10. about = {
  11. "website": 'https://www.bing.com',
  12. "wikidata_id": 'Q182496',
  13. "official_api_documentation": 'https://www.microsoft.com/en-us/bing/apis/bing-web-search-api',
  14. "use_official_api": False,
  15. "require_api_key": False,
  16. "results": 'HTML',
  17. }
  18. # engine dependent config
  19. categories = ['general', 'web']
  20. paging = True
  21. time_range_support = False
  22. safesearch = False
  23. supported_languages_url = 'https://www.bing.com/account/general'
  24. language_aliases = {}
  25. # search-url
  26. base_url = 'https://www.bing.com/'
  27. # initial query: https://www.bing.com/search?q=foo&search=&form=QBLH
  28. inital_query = 'search?{query}&search=&form=QBLH'
  29. # following queries: https://www.bing.com/search?q=foo&search=&first=11&FORM=PERE
  30. page_query = 'search?{query}&search=&first={offset}&FORM=PERE'
  31. def _get_offset_from_pageno(pageno):
  32. return (pageno - 1) * 10 + 1
  33. def request(query, params):
  34. offset = _get_offset_from_pageno(params.get('pageno', 1))
  35. # logger.debug("params['pageno'] --> %s", params.get('pageno'))
  36. # logger.debug(" offset --> %s", offset)
  37. search_string = page_query
  38. if offset == 1:
  39. search_string = inital_query
  40. if params['language'] == 'all':
  41. lang = 'EN'
  42. else:
  43. lang = match_language(params['language'], supported_languages, language_aliases)
  44. query = 'language:{} {}'.format(lang.split('-')[0].upper(), query)
  45. search_path = search_string.format(query=urlencode({'q': query}), offset=offset)
  46. if offset > 1:
  47. referer = base_url + inital_query.format(query=urlencode({'q': query}))
  48. params['headers']['Referer'] = referer
  49. logger.debug("headers.Referer --> %s", referer)
  50. params['url'] = base_url + search_path
  51. params['headers']['Accept-Language'] = "en-US,en;q=0.5"
  52. params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
  53. return params
  54. def response(resp):
  55. results = []
  56. result_len = 0
  57. dom = html.fromstring(resp.text)
  58. for result in eval_xpath(dom, '//div[@class="sa_cc"]'):
  59. # IMO //div[@class="sa_cc"] does no longer match
  60. logger.debug('found //div[@class="sa_cc"] --> %s', result)
  61. link = eval_xpath(result, './/h3/a')[0]
  62. url = link.attrib.get('href')
  63. title = extract_text(link)
  64. content = extract_text(eval_xpath(result, './/p'))
  65. # append result
  66. results.append({'url': url, 'title': title, 'content': content})
  67. # parse results again if nothing is found yet
  68. for result in eval_xpath(dom, '//li[@class="b_algo"]'):
  69. link = eval_xpath(result, './/h2/a')[0]
  70. url = link.attrib.get('href')
  71. title = extract_text(link)
  72. content = extract_text(eval_xpath(result, './/p'))
  73. # append result
  74. results.append({'url': url, 'title': title, 'content': content})
  75. try:
  76. result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]//text()'))
  77. if "-" in result_len_container:
  78. # Remove the part "from-to" for paginated request ...
  79. result_len_container = result_len_container[result_len_container.find("-") * 2 + 2 :]
  80. result_len_container = re.sub('[^0-9]', '', result_len_container)
  81. if len(result_len_container) > 0:
  82. result_len = int(result_len_container)
  83. except Exception as e: # pylint: disable=broad-except
  84. logger.debug('result error :\n%s', e)
  85. if result_len and _get_offset_from_pageno(resp.search_params.get("pageno", 0)) > result_len:
  86. return []
  87. results.append({'number_of_results': result_len})
  88. return results
  89. # get supported languages from their site
  90. def _fetch_supported_languages(resp):
  91. lang_tags = set()
  92. dom = html.fromstring(resp.text)
  93. lang_links = eval_xpath(dom, '//div[@id="language-section"]//li')
  94. for _li in lang_links:
  95. href = eval_xpath(_li, './/@href')[0]
  96. (_scheme, _netloc, _path, _params, query, _fragment) = urlparse(href)
  97. query = parse_qs(query, keep_blank_values=True)
  98. # fmt: off
  99. setlang = query.get('setlang', [None, ])[0]
  100. # example: 'mn-Cyrl-MN' --> '['mn', 'Cyrl-MN']
  101. lang, nation = (setlang.split('-', maxsplit=1) + [None,])[:2] # fmt: skip
  102. # fmt: on
  103. tag = lang + '-' + nation if nation else lang
  104. lang_tags.add(tag)
  105. return list(lang_tags)