bing.py 2.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. """
  2. Bing (Web)
  3. @website https://www.bing.com
  4. @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
  5. max. 5000 query/month
  6. @using-api no (because of query limit)
  7. @results HTML (using search portal)
  8. @stable no (HTML can change)
  9. @parse url, title, content
  10. @todo publishedDate
  11. """
  12. from urllib import urlencode
  13. from lxml import html
  14. from requests import get
  15. from searx.engines.xpath import extract_text
  16. # engine dependent config
  17. categories = ['general']
  18. paging = True
  19. language_support = True
  20. supported_languages_url = 'https://www.bing.com/account/general'
  21. # search-url
  22. base_url = 'https://www.bing.com/'
  23. search_string = 'search?{query}&first={offset}'
  24. # do search-request
  25. def request(query, params):
  26. offset = (params['pageno'] - 1) * 10 + 1
  27. if params['language'] != 'all':
  28. query = u'language:{} {}'.format(params['language'].split('-')[0].upper(),
  29. query.decode('utf-8')).encode('utf-8')
  30. search_path = search_string.format(
  31. query=urlencode({'q': query}),
  32. offset=offset)
  33. params['url'] = base_url + search_path
  34. return params
  35. # get response from search-request
  36. def response(resp):
  37. results = []
  38. dom = html.fromstring(resp.text)
  39. try:
  40. results.append({'number_of_results': int(dom.xpath('//span[@class="sb_count"]/text()')[0]
  41. .split()[0].replace(',', ''))})
  42. except:
  43. pass
  44. # parse results
  45. for result in dom.xpath('//div[@class="sa_cc"]'):
  46. link = result.xpath('.//h3/a')[0]
  47. url = link.attrib.get('href')
  48. title = extract_text(link)
  49. content = extract_text(result.xpath('.//p'))
  50. # append result
  51. results.append({'url': url,
  52. 'title': title,
  53. 'content': content})
  54. # parse results again if nothing is found yet
  55. for result in dom.xpath('//li[@class="b_algo"]'):
  56. link = result.xpath('.//h2/a')[0]
  57. url = link.attrib.get('href')
  58. title = extract_text(link)
  59. content = extract_text(result.xpath('.//p'))
  60. # append result
  61. results.append({'url': url,
  62. 'title': title,
  63. 'content': content})
  64. # return results
  65. return results
  66. # get supported languages from their site
  67. def fetch_supported_languages():
  68. supported_languages = []
  69. response = get(supported_languages_url)
  70. dom = html.fromstring(response.text)
  71. options = dom.xpath('//div[@id="limit-languages"]//input')
  72. for option in options:
  73. code = option.xpath('./@id')[0].replace('_', '-')
  74. supported_languages.append(code)
  75. return supported_languages