bing.py 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. """
  2. Bing (Web)
  3. @website https://www.bing.com
  4. @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
  5. max. 5000 query/month
  6. @using-api no (because of query limit)
  7. @results HTML (using search portal)
  8. @stable no (HTML can change)
  9. @parse url, title, content
  10. @todo publishedDate
  11. """
  12. from urllib import urlencode
  13. from lxml import html
  14. from searx.engines.xpath import extract_text
  15. # engine dependent config
  16. categories = ['general']
  17. paging = True
  18. language_support = True
  19. supported_languages_url = 'https://www.bing.com/account/general'
  20. # search-url
  21. base_url = 'https://www.bing.com/'
  22. search_string = 'search?{query}&first={offset}'
  23. # do search-request
  24. def request(query, params):
  25. offset = (params['pageno'] - 1) * 10 + 1
  26. if params['language'] != 'all':
  27. query = u'language:{} {}'.format(params['language'].split('-')[0].upper(),
  28. query.decode('utf-8')).encode('utf-8')
  29. search_path = search_string.format(
  30. query=urlencode({'q': query}),
  31. offset=offset)
  32. params['url'] = base_url + search_path
  33. return params
  34. # get response from search-request
  35. def response(resp):
  36. results = []
  37. dom = html.fromstring(resp.text)
  38. try:
  39. results.append({'number_of_results': int(dom.xpath('//span[@class="sb_count"]/text()')[0]
  40. .split()[0].replace(',', ''))})
  41. except:
  42. pass
  43. # parse results
  44. for result in dom.xpath('//div[@class="sa_cc"]'):
  45. link = result.xpath('.//h3/a')[0]
  46. url = link.attrib.get('href')
  47. title = extract_text(link)
  48. content = extract_text(result.xpath('.//p'))
  49. # append result
  50. results.append({'url': url,
  51. 'title': title,
  52. 'content': content})
  53. # parse results again if nothing is found yet
  54. for result in dom.xpath('//li[@class="b_algo"]'):
  55. link = result.xpath('.//h2/a')[0]
  56. url = link.attrib.get('href')
  57. title = extract_text(link)
  58. content = extract_text(result.xpath('.//p'))
  59. # append result
  60. results.append({'url': url,
  61. 'title': title,
  62. 'content': content})
  63. # return results
  64. return results
  65. # get supported languages from their site
  66. def _fetch_supported_languages(resp):
  67. supported_languages = []
  68. dom = html.fromstring(resp.text)
  69. options = dom.xpath('//div[@id="limit-languages"]//input')
  70. for option in options:
  71. code = option.xpath('./@id')[0].replace('_', '-')
  72. supported_languages.append(code)
  73. return supported_languages