yahoo_news.py 1.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051
  1. #!/usr/bin/env python
  2. from urllib import urlencode
  3. from urlparse import unquote
  4. from lxml import html
  5. from searx.engines.xpath import extract_text, extract_url
  6. categories = ['news']
  7. search_url = 'http://news.search.yahoo.com/search?{query}&b={offset}'
  8. results_xpath = '//div[@class="res"]'
  9. url_xpath = './/h3/a/@href'
  10. title_xpath = './/h3/a'
  11. content_xpath = './/div[@class="abstr"]'
  12. suggestion_xpath = '//div[@id="satat"]//a'
  13. paging = True
  14. def request(query, params):
  15. offset = (params['pageno'] - 1) * 10 + 1
  16. if params['language'] == 'all':
  17. language = 'en'
  18. else:
  19. language = params['language'].split('_')[0]
  20. params['url'] = search_url.format(offset=offset,
  21. query=urlencode({'p': query}))
  22. params['cookies']['sB'] = 'fl=1&vl=lang_{lang}&sh=1&rw=new&v=1'\
  23. .format(lang=language)
  24. return params
  25. def response(resp):
  26. results = []
  27. dom = html.fromstring(resp.text)
  28. for result in dom.xpath(results_xpath):
  29. url_string = extract_url(result.xpath(url_xpath), search_url)
  30. start = url_string.find('http', url_string.find('/RU=')+1)
  31. end = url_string.rfind('/RS')
  32. url = unquote(url_string[start:end])
  33. title = extract_text(result.xpath(title_xpath)[0])
  34. content = extract_text(result.xpath(content_xpath)[0])
  35. results.append({'url': url, 'title': title, 'content': content})
  36. if not suggestion_xpath:
  37. return results
  38. for suggestion in dom.xpath(suggestion_xpath):
  39. results.append({'suggestion': extract_text(suggestion)})
  40. return results