123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127 |
- """
- Bing (Web)
- @website https://www.bing.com
- @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
- max. 5000 query/month
- @using-api no (because of query limit)
- @results HTML (using search portal)
- @stable no (HTML can change)
- @parse url, title, content
- @todo publishedDate
- """
- import re
- from lxml import html
- from searx import logger, utils
- from searx.engines.xpath import extract_text
- from searx.url_utils import urlencode
- from searx.utils import match_language, gen_useragent, eval_xpath
- logger = logger.getChild('bing engine')
- # engine dependent config
- categories = ['general']
- paging = True
- language_support = True
- supported_languages_url = 'https://www.bing.com/account/general'
- language_aliases = {'zh-CN': 'zh-CHS', 'zh-TW': 'zh-CHT', 'zh-HK': 'zh-CHT'}
- # search-url
- base_url = 'https://www.bing.com/'
- search_string = 'search?{query}&first={offset}'
- def _get_offset_from_pageno(pageno):
- return (pageno - 1) * 10 + 1
- # do search-request
- def request(query, params):
- offset = _get_offset_from_pageno(params.get('pageno', 0))
- if params['language'] == 'all':
- lang = 'EN'
- else:
- lang = match_language(params['language'], supported_languages, language_aliases)
- query = u'language:{} {}'.format(lang.split('-')[0].upper(), query.decode('utf-8')).encode('utf-8')
- search_path = search_string.format(
- query=urlencode({'q': query}),
- offset=offset)
- params['url'] = base_url + search_path
- return params
- # get response from search-request
- def response(resp):
- results = []
- result_len = 0
- dom = html.fromstring(resp.text)
- # parse results
- for result in eval_xpath(dom, '//div[@class="sa_cc"]'):
- link = eval_xpath(result, './/h3/a')[0]
- url = link.attrib.get('href')
- title = extract_text(link)
- content = extract_text(eval_xpath(result, './/p'))
- # append result
- results.append({'url': url,
- 'title': title,
- 'content': content})
- # parse results again if nothing is found yet
- for result in eval_xpath(dom, '//li[@class="b_algo"]'):
- link = eval_xpath(result, './/h2/a')[0]
- url = link.attrib.get('href')
- title = extract_text(link)
- content = extract_text(eval_xpath(result, './/p'))
- # append result
- results.append({'url': url,
- 'title': title,
- 'content': content})
- try:
- result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]//text()'))
- if "-" in result_len_container:
- # Remove the part "from-to" for paginated request ...
- result_len_container = result_len_container[result_len_container.find("-") * 2 + 2:]
- result_len_container = re.sub('[^0-9]', '', result_len_container)
- if len(result_len_container) > 0:
- result_len = int(result_len_container)
- except Exception as e:
- logger.debug('result error :\n%s', e)
- pass
- if result_len and _get_offset_from_pageno(resp.search_params.get("pageno", 0)) > result_len:
- return []
- results.append({'number_of_results': result_len})
- return results
- # get supported languages from their site
- def _fetch_supported_languages(resp):
- lang_tags = set()
- setmkt = re.compile('setmkt=([^&]*)')
- dom = html.fromstring(resp.text)
- lang_links = eval_xpath(dom, "//li/a[contains(@href, 'setmkt')]")
- for a in lang_links:
- href = eval_xpath(a, './@href')[0]
- match = setmkt.search(href)
- l_tag = match.groups()[0]
- _lang, _nation = l_tag.split('-',1)
- l_tag = _lang.lower() + '-' + _nation.upper()
- lang_tags.add(l_tag)
- return list(lang_tags)
|