bing.py 2.3 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788
  1. """
  2. Bing (Web)
  3. @website https://www.bing.com
  4. @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
  5. max. 5000 query/month
  6. @using-api no (because of query limit)
  7. @results HTML (using search portal)
  8. @stable no (HTML can change)
  9. @parse url, title, content
  10. @todo publishedDate
  11. """
  12. from urllib import urlencode
  13. from cgi import escape
  14. from lxml import html
  15. from searx.engines.xpath import extract_text
  16. # engine dependent config
  17. categories = ['general']
  18. paging = True
  19. language_support = True
  20. # search-url
  21. base_url = 'https://www.bing.com/'
  22. search_string = 'search?{query}&first={offset}'
  23. # do search-request
  24. def request(query, params):
  25. offset = (params['pageno'] - 1) * 10 + 1
  26. if params['language'] == 'all':
  27. language = 'en-US'
  28. else:
  29. language = params['language'].replace('_', '-')
  30. search_path = search_string.format(
  31. query=urlencode({'q': query, 'setmkt': language}),
  32. offset=offset)
  33. params['cookies']['SRCHHPGUSR'] = \
  34. 'NEWWND=0&NRSLT=-1&SRCHLANG=' + language.split('-')[0]
  35. params['url'] = base_url + search_path
  36. return params
  37. # get response from search-request
  38. def response(resp):
  39. results = []
  40. dom = html.fromstring(resp.text)
  41. try:
  42. results.append({'number_of_results': int(dom.xpath('//span[@class="sb_count"]/text()')[0]
  43. .split()[0].replace(',', ''))})
  44. except:
  45. pass
  46. # parse results
  47. for result in dom.xpath('//div[@class="sa_cc"]'):
  48. link = result.xpath('.//h3/a')[0]
  49. url = link.attrib.get('href')
  50. title = extract_text(link)
  51. content = escape(extract_text(result.xpath('.//p')))
  52. # append result
  53. results.append({'url': url,
  54. 'title': title,
  55. 'content': content})
  56. # parse results again if nothing is found yet
  57. for result in dom.xpath('//li[@class="b_algo"]'):
  58. link = result.xpath('.//h2/a')[0]
  59. url = link.attrib.get('href')
  60. title = extract_text(link)
  61. content = escape(extract_text(result.xpath('.//p')))
  62. # append result
  63. results.append({'url': url,
  64. 'title': title,
  65. 'content': content})
  66. # return results
  67. return results