bing_news.py 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. """
  2. Bing (News)
  3. @website https://www.bing.com/news
  4. @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
  5. max. 5000 query/month
  6. @using-api no (because of query limit)
  7. @results RSS (using search portal)
  8. @stable yes (except perhaps for the images)
  9. @parse url, title, content, publishedDate, thumbnail
  10. """
  11. from datetime import datetime
  12. from dateutil import parser
  13. from urllib.parse import urlencode, urlparse, parse_qsl
  14. from lxml import etree
  15. from lxml.etree import XPath
  16. from searx.utils import match_language, eval_xpath_getindex
  17. from searx.engines.bing import language_aliases
  18. from searx.engines.bing import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
  19. # engine dependent config
  20. categories = ['news']
  21. paging = True
  22. language_support = True
  23. time_range_support = True
  24. # search-url
  25. base_url = 'https://www.bing.com/'
  26. search_string = 'news/search?{query}&first={offset}&format=RSS'
  27. search_string_with_time = 'news/search?{query}&first={offset}&qft=interval%3d"{interval}"&format=RSS'
  28. time_range_dict = {'day': '7',
  29. 'week': '8',
  30. 'month': '9'}
  31. # remove click
  32. def url_cleanup(url_string):
  33. parsed_url = urlparse(url_string)
  34. if parsed_url.netloc == 'www.bing.com' and parsed_url.path == '/news/apiclick.aspx':
  35. query = dict(parse_qsl(parsed_url.query))
  36. return query.get('url', None)
  37. return url_string
  38. # replace the http://*bing4.com/th?id=... by https://www.bing.com/th?id=...
  39. def image_url_cleanup(url_string):
  40. parsed_url = urlparse(url_string)
  41. if parsed_url.netloc.endswith('bing4.com') and parsed_url.path == '/th':
  42. query = dict(parse_qsl(parsed_url.query))
  43. return "https://www.bing.com/th?id=" + query.get('id')
  44. return url_string
  45. def _get_url(query, language, offset, time_range):
  46. if time_range in time_range_dict:
  47. search_path = search_string_with_time.format(
  48. query=urlencode({'q': query, 'setmkt': language}),
  49. offset=offset,
  50. interval=time_range_dict[time_range])
  51. else:
  52. # e.g. setmkt=de-de&setlang=de
  53. search_path = search_string.format(
  54. query=urlencode({'q': query, 'setmkt': language}),
  55. offset=offset)
  56. return base_url + search_path
  57. # do search-request
  58. def request(query, params):
  59. if params['time_range'] and params['time_range'] not in time_range_dict:
  60. return params
  61. offset = (params['pageno'] - 1) * 10 + 1
  62. if params['language'] == 'all':
  63. language = 'en-US'
  64. else:
  65. language = match_language(params['language'], supported_languages, language_aliases)
  66. params['url'] = _get_url(query, language, offset, params['time_range'])
  67. return params
  68. # get response from search-request
  69. def response(resp):
  70. results = []
  71. rss = etree.fromstring(resp.content)
  72. ns = rss.nsmap
  73. # parse results
  74. for item in rss.xpath('./channel/item'):
  75. # url / title / content
  76. url = url_cleanup(eval_xpath_getindex(item, './link/text()', 0, default=None))
  77. title = eval_xpath_getindex(item, './title/text()', 0, default=url)
  78. content = eval_xpath_getindex(item, './description/text()', 0, default='')
  79. # publishedDate
  80. publishedDate = eval_xpath_getindex(item, './pubDate/text()', 0, default=None)
  81. try:
  82. publishedDate = parser.parse(publishedDate, dayfirst=False)
  83. except TypeError:
  84. publishedDate = datetime.now()
  85. except ValueError:
  86. publishedDate = datetime.now()
  87. # thumbnail
  88. thumbnail = eval_xpath_getindex(item, XPath('./News:Image/text()', namespaces=ns), 0, default=None)
  89. if thumbnail is not None:
  90. thumbnail = image_url_cleanup(thumbnail)
  91. # append result
  92. if thumbnail is not None:
  93. results.append({'url': url,
  94. 'title': title,
  95. 'publishedDate': publishedDate,
  96. 'content': content,
  97. 'img_src': thumbnail})
  98. else:
  99. results.append({'url': url,
  100. 'title': title,
  101. 'publishedDate': publishedDate,
  102. 'content': content})
  103. # return results
  104. return results