yahoo_news.py 2.9 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. ## Yahoo (News)
  2. #
  3. # @website https://news.yahoo.com
  4. # @provide-api yes (https://developer.yahoo.com/boss/search/), $0.80/1000 queries
  5. #
  6. # @using-api no (because pricing)
  7. # @results HTML (using search portal)
  8. # @stable no (HTML can change)
  9. # @parse url, title, content, publishedDate
  10. from urllib import urlencode
  11. from lxml import html
  12. from searx.engines.xpath import extract_text, extract_url
  13. from searx.engines.yahoo import parse_url
  14. from datetime import datetime, timedelta
  15. import re
  16. from dateutil import parser
  17. # engine dependent config
  18. categories = ['news']
  19. paging = True
  20. language_support = True
  21. # search-url
  22. search_url = 'https://news.search.yahoo.com/search?{query}&b={offset}&fl=1&vl=lang_{lang}'
  23. # specific xpath variables
  24. results_xpath = '//div[@class="res"]'
  25. url_xpath = './/h3/a/@href'
  26. title_xpath = './/h3/a'
  27. content_xpath = './/div[@class="abstr"]'
  28. publishedDate_xpath = './/span[@class="timestamp"]'
  29. suggestion_xpath = '//div[@id="satat"]//a'
  30. # do search-request
  31. def request(query, params):
  32. offset = (params['pageno'] - 1) * 10 + 1
  33. if params['language'] == 'all':
  34. language = 'en'
  35. else:
  36. language = params['language'].split('_')[0]
  37. params['url'] = search_url.format(offset=offset,
  38. query=urlencode({'p': query}),
  39. lang=language)
  40. # TODO required?
  41. params['cookies']['sB'] = 'fl=1&vl=lang_{lang}&sh=1&rw=new&v=1'\
  42. .format(lang=language)
  43. return params
  44. # get response from search-request
  45. def response(resp):
  46. results = []
  47. dom = html.fromstring(resp.text)
  48. # parse results
  49. for result in dom.xpath(results_xpath):
  50. url = parse_url(extract_url(result.xpath(url_xpath), search_url))
  51. title = extract_text(result.xpath(title_xpath)[0])
  52. content = extract_text(result.xpath(content_xpath)[0])
  53. # parse publishedDate
  54. publishedDate = extract_text(result.xpath(publishedDate_xpath)[0])
  55. if re.match("^[0-9]+ minute(s|) ago$", publishedDate):
  56. publishedDate = datetime.now() - timedelta(minutes=int(re.match(r'\d+', publishedDate).group())) # noqa
  57. else:
  58. if re.match("^[0-9]+ hour(s|), [0-9]+ minute(s|) ago$",
  59. publishedDate):
  60. timeNumbers = re.findall(r'\d+', publishedDate)
  61. publishedDate = datetime.now()\
  62. - timedelta(hours=int(timeNumbers[0]))\
  63. - timedelta(minutes=int(timeNumbers[1]))
  64. else:
  65. publishedDate = parser.parse(publishedDate)
  66. if publishedDate.year == 1900:
  67. publishedDate = publishedDate.replace(year=datetime.now().year)
  68. # append result
  69. results.append({'url': url,
  70. 'title': title,
  71. 'content': content,
  72. 'publishedDate': publishedDate})
  73. # return results
  74. return results