xpath.py 1.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354
  1. from lxml import html
  2. from urllib import urlencode
  3. from urlparse import urlparse, urljoin
  4. from cgi import escape
  5. from lxml.etree import _ElementStringResult
  6. search_url = None
  7. results_xpath = None
  8. url_xpath = None
  9. content_xpath = None
  10. title_xpath = None
  11. def extract_url(xpath_results):
  12. url = ''
  13. parsed_search_url = urlparse(search_url)
  14. if type(xpath_results) == list:
  15. if not len(xpath_results):
  16. raise Exception('Empty url resultset')
  17. if type(xpath_results[0]) == _ElementStringResult:
  18. url = ''.join(xpath_results)
  19. if url.startswith('//'):
  20. url = parsed_search_url.scheme+url
  21. elif url.startswith('/'):
  22. url = urljoin(search_url, url)
  23. #TODO
  24. else:
  25. url = xpath_results[0].attrib.get('href')
  26. else:
  27. raise Exception('Cannot handle xpath url resultset')
  28. if not url.startswith('http://') or not url.startswith('https://'):
  29. url = 'http://'+url
  30. parsed_url = urlparse(url)
  31. if not parsed_url.netloc:
  32. raise Exception('Cannot parse url')
  33. return url
  34. def request(query, params):
  35. query = urlencode({'q': query})[2:]
  36. params['url'] = search_url.format(query=query)
  37. params['query'] = query
  38. return params
  39. def response(resp):
  40. results = []
  41. dom = html.fromstring(resp.text)
  42. query = resp.search_params['query']
  43. for result in dom.xpath(results_xpath):
  44. url = extract_url(result.xpath(url_xpath))
  45. title = ' '.join(result.xpath(title_xpath))
  46. content = escape(' '.join(result.xpath(content_xpath))).replace(query, '<b>{0}</b>'.format(query))
  47. results.append({'url': url, 'title': title, 'content': content})
  48. return results