duckduckgo.py 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """DuckDuckGo Lite
  4. """
  5. import json
  6. from lxml import html
  7. from searx.utils import (
  8. dict_subset,
  9. eval_xpath,
  10. eval_xpath_getindex,
  11. extract_text,
  12. match_language,
  13. )
  14. from searx import network
  15. from searx.enginelib.traits import EngineTraits
  16. traits: EngineTraits
  17. # about
  18. about = {
  19. "website": 'https://lite.duckduckgo.com/lite/',
  20. "wikidata_id": 'Q12805',
  21. "official_api_documentation": 'https://duckduckgo.com/api',
  22. "use_official_api": False,
  23. "require_api_key": False,
  24. "results": 'HTML',
  25. }
  26. # engine dependent config
  27. categories = ['general', 'web']
  28. paging = True
  29. supported_languages_url = 'https://duckduckgo.com/util/u588.js'
  30. time_range_support = True
  31. send_accept_language_header = True
  32. language_aliases = {
  33. 'ar-SA': 'ar-XA',
  34. 'es-419': 'es-XL',
  35. 'ja': 'jp-JP',
  36. 'ko': 'kr-KR',
  37. 'sl-SI': 'sl-SL',
  38. 'zh-TW': 'tzh-TW',
  39. 'zh-HK': 'tzh-HK',
  40. }
  41. time_range_dict = {'day': 'd', 'week': 'w', 'month': 'm', 'year': 'y'}
  42. # search-url
  43. url = 'https://lite.duckduckgo.com/lite/'
  44. url_ping = 'https://duckduckgo.com/t/sl_l'
  45. # match query's language to a region code that duckduckgo will accept
  46. def get_region_code(lang, lang_list=None):
  47. if lang == 'all':
  48. return None
  49. lang_code = match_language(lang, lang_list or [], language_aliases, 'wt-WT')
  50. lang_parts = lang_code.split('-')
  51. # country code goes first
  52. return lang_parts[1].lower() + '-' + lang_parts[0].lower()
  53. def request(query, params):
  54. params['url'] = url
  55. params['method'] = 'POST'
  56. params['data']['q'] = query
  57. # The API is not documented, so we do some reverse engineering and emulate
  58. # what https://lite.duckduckgo.com/lite/ does when you press "next Page"
  59. # link again and again ..
  60. params['headers']['Content-Type'] = 'application/x-www-form-urlencoded'
  61. params['headers']['Referer'] = 'https://google.com/'
  62. # initial page does not have an offset
  63. if params['pageno'] == 2:
  64. # second page does have an offset of 30
  65. offset = (params['pageno'] - 1) * 30
  66. params['data']['s'] = offset
  67. params['data']['dc'] = offset + 1
  68. elif params['pageno'] > 2:
  69. # third and following pages do have an offset of 30 + n*50
  70. offset = 30 + (params['pageno'] - 2) * 50
  71. params['data']['s'] = offset
  72. params['data']['dc'] = offset + 1
  73. # initial page does not have additional data in the input form
  74. if params['pageno'] > 1:
  75. # request the second page (and more pages) needs 'o' and 'api' arguments
  76. params['data']['o'] = 'json'
  77. params['data']['api'] = 'd.js'
  78. # initial page does not have additional data in the input form
  79. if params['pageno'] > 2:
  80. # request the third page (and more pages) some more arguments
  81. params['data']['nextParams'] = ''
  82. params['data']['v'] = ''
  83. params['data']['vqd'] = ''
  84. region_code = get_region_code(params['language'], supported_languages)
  85. if region_code:
  86. params['data']['kl'] = region_code
  87. params['cookies']['kl'] = region_code
  88. params['data']['df'] = ''
  89. if params['time_range'] in time_range_dict:
  90. params['data']['df'] = time_range_dict[params['time_range']]
  91. params['cookies']['df'] = time_range_dict[params['time_range']]
  92. logger.debug("param data: %s", params['data'])
  93. logger.debug("param cookies: %s", params['cookies'])
  94. return params
  95. # get response from search-request
  96. def response(resp):
  97. headers_ping = dict_subset(resp.request.headers, ['User-Agent', 'Accept-Encoding', 'Accept', 'Cookie'])
  98. network.get(url_ping, headers=headers_ping)
  99. if resp.status_code == 303:
  100. return []
  101. results = []
  102. doc = html.fromstring(resp.text)
  103. result_table = eval_xpath(doc, '//html/body/form/div[@class="filters"]/table')
  104. if not len(result_table) >= 3:
  105. # no more results
  106. return []
  107. result_table = result_table[2]
  108. tr_rows = eval_xpath(result_table, './/tr')
  109. # In the last <tr> is the form of the 'previous/next page' links
  110. tr_rows = tr_rows[:-1]
  111. len_tr_rows = len(tr_rows)
  112. offset = 0
  113. while len_tr_rows >= offset + 4:
  114. # assemble table rows we need to scrap
  115. tr_title = tr_rows[offset]
  116. tr_content = tr_rows[offset + 1]
  117. offset += 4
  118. # ignore sponsored Adds <tr class="result-sponsored">
  119. if tr_content.get('class') == 'result-sponsored':
  120. continue
  121. a_tag = eval_xpath_getindex(tr_title, './/td//a[@class="result-link"]', 0, None)
  122. if a_tag is None:
  123. continue
  124. td_content = eval_xpath_getindex(tr_content, './/td[@class="result-snippet"]', 0, None)
  125. if td_content is None:
  126. continue
  127. results.append(
  128. {
  129. 'title': a_tag.text_content(),
  130. 'content': extract_text(td_content),
  131. 'url': a_tag.get('href'),
  132. }
  133. )
  134. return results
  135. # get supported languages from their site
  136. def _fetch_supported_languages(resp):
  137. # response is a js file with regions as an embedded object
  138. response_page = resp.text
  139. response_page = response_page[response_page.find('regions:{') + 8 :]
  140. response_page = response_page[: response_page.find('}') + 1]
  141. regions_json = json.loads(response_page)
  142. supported_languages = map((lambda x: x[3:] + '-' + x[:2].upper()), regions_json.keys())
  143. return list(supported_languages)
  144. def fetch_traits(engine_traits: EngineTraits):
  145. """Fetch regions from DuckDuckGo."""
  146. # pylint: disable=import-outside-toplevel
  147. engine_traits.data_type = 'supported_languages' # deprecated
  148. import babel
  149. from searx.locales import region_tag
  150. engine_traits.all_locale = 'wt-wt'
  151. resp = network.get('https://duckduckgo.com/util/u588.js')
  152. if not resp.ok:
  153. print("ERROR: response from DuckDuckGo is not OK.")
  154. pos = resp.text.find('regions:{') + 8
  155. js_code = resp.text[pos:]
  156. pos = js_code.find('}') + 1
  157. regions = json.loads(js_code[:pos])
  158. reg_map = {
  159. 'tw-tzh': 'zh_TW',
  160. 'hk-tzh': 'zh_HK',
  161. 'ct-ca': 'skip', # ct-ca and es-ca both map to ca_ES
  162. 'es-ca': 'ca_ES',
  163. 'id-en': 'id_ID',
  164. 'no-no': 'nb_NO',
  165. 'jp-jp': 'ja_JP',
  166. 'kr-kr': 'ko_KR',
  167. 'xa-ar': 'ar_SA',
  168. 'sl-sl': 'sl_SI',
  169. 'th-en': 'th_TH',
  170. 'vn-en': 'vi_VN',
  171. }
  172. for eng_tag, name in regions.items():
  173. if eng_tag == 'wt-wt':
  174. engine_traits.all_locale = 'wt-wt'
  175. continue
  176. region = reg_map.get(eng_tag)
  177. if region == 'skip':
  178. continue
  179. if not region:
  180. eng_territory, eng_lang = eng_tag.split('-')
  181. region = eng_lang + '_' + eng_territory.upper()
  182. try:
  183. sxng_tag = region_tag(babel.Locale.parse(region))
  184. except babel.UnknownLocaleError:
  185. print("ERROR: %s (%s) -> %s is unknown by babel" % (name, eng_tag, region))
  186. continue
  187. conflict = engine_traits.regions.get(sxng_tag)
  188. if conflict:
  189. if conflict != eng_tag:
  190. print("CONFLICT: babel %s --> %s, %s" % (sxng_tag, conflict, eng_tag))
  191. continue
  192. engine_traits.regions[sxng_tag] = eng_tag