bing.py 2.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283
  1. """
  2. Bing (Web)
  3. @website https://www.bing.com
  4. @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
  5. max. 5000 query/month
  6. @using-api no (because of query limit)
  7. @results HTML (using search portal)
  8. @stable no (HTML can change)
  9. @parse url, title, content
  10. @todo publishedDate
  11. """
  12. from urllib import urlencode
  13. from lxml import html
  14. from searx.engines.xpath import extract_text
  15. # engine dependent config
  16. categories = ['general']
  17. paging = True
  18. language_support = True
  19. # search-url
  20. base_url = 'https://www.bing.com/'
  21. search_string = 'search?{query}&first={offset}'
  22. # do search-request
  23. def request(query, params):
  24. offset = (params['pageno'] - 1) * 10 + 1
  25. if params['language'] != 'all':
  26. query = u'language:{} {}'.format(params['language'].split('-')[0].upper(),
  27. query.decode('utf-8')).encode('utf-8')
  28. search_path = search_string.format(
  29. query=urlencode({'q': query}),
  30. offset=offset)
  31. params['url'] = base_url + search_path
  32. return params
  33. # get response from search-request
  34. def response(resp):
  35. results = []
  36. dom = html.fromstring(resp.text)
  37. try:
  38. results.append({'number_of_results': int(dom.xpath('//span[@class="sb_count"]/text()')[0]
  39. .split()[0].replace(',', ''))})
  40. except:
  41. pass
  42. # parse results
  43. for result in dom.xpath('//div[@class="sa_cc"]'):
  44. link = result.xpath('.//h3/a')[0]
  45. url = link.attrib.get('href')
  46. title = extract_text(link)
  47. content = extract_text(result.xpath('.//p'))
  48. # append result
  49. results.append({'url': url,
  50. 'title': title,
  51. 'content': content})
  52. # parse results again if nothing is found yet
  53. for result in dom.xpath('//li[@class="b_algo"]'):
  54. link = result.xpath('.//h2/a')[0]
  55. url = link.attrib.get('href')
  56. title = extract_text(link)
  57. content = extract_text(result.xpath('.//p'))
  58. # append result
  59. results.append({'url': url,
  60. 'title': title,
  61. 'content': content})
  62. # return results
  63. return results