annas_archive.py 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """.. _annas_archive engine:
  4. ==============
  5. Anna's Archive
  6. ==============
  7. .. _Anna's Archive: https://annas-archive.org/
  8. .. _AnnaArchivist: https://annas-software.org/AnnaArchivist/annas-archive
  9. `Anna's Archive`_ is a free non-profit online shadow library metasearch engine
  10. providing access to a variety of book resources (also via IPFS), created by a
  11. team of anonymous archivists (AnnaArchivist_).
  12. .. contents:: Contents
  13. :depth: 2
  14. :local:
  15. :backlinks: entry
  16. Configuration
  17. =============
  18. The engine has the following additional settings:
  19. - :py:obj:`aa_content`
  20. - :py:obj:`aa_ext`
  21. - :py:obj:`aa_sort`
  22. With this options a SearXNG maintainer is able to configure **additional**
  23. engines for specific searches in Anna's Archive. For example a engine to search
  24. for *newest* articles and journals (PDF) / by shortcut ``!aaa <search-term>``.
  25. .. code:: yaml
  26. - name: annas articles
  27. engine: annas_archive
  28. shortcut: aaa
  29. aa_content: 'journal_article'
  30. aa_ext: 'pdf'
  31. aa_sort: 'newest'
  32. Implementations
  33. ===============
  34. """
  35. from typing import List, Dict, Any, Optional
  36. from urllib.parse import quote
  37. from lxml import html
  38. from searx.utils import extract_text, eval_xpath, eval_xpath_list
  39. from searx.enginelib.traits import EngineTraits
  40. from searx.data import ENGINE_TRAITS
  41. # about
  42. about: Dict[str, Any] = {
  43. "website": "https://annas-archive.org/",
  44. "wikidata_id": "Q115288326",
  45. "official_api_documentation": None,
  46. "use_official_api": False,
  47. "require_api_key": False,
  48. "results": "HTML",
  49. }
  50. # engine dependent config
  51. categories: List[str] = ["files"]
  52. paging: bool = False
  53. # search-url
  54. base_url: str = "https://annas-archive.org"
  55. aa_content: str = ""
  56. """Anan's search form field **Content** / possible values::
  57. journal_article, book_any, book_fiction, book_unknown, book_nonfiction,
  58. book_comic, magazine, standards_document
  59. To not filter use an empty string (default).
  60. """
  61. aa_sort: str = ''
  62. """Sort Anna's results, possible values::
  63. newest, oldest, largest, smallest
  64. To sort by *most relevant* use an empty string (default)."""
  65. aa_ext: str = ''
  66. """Filter Anna's results by a file ending. Common filters for example are
  67. ``pdf`` and ``epub``.
  68. .. note::
  69. Anna's Archive is a beta release: Filter results by file extension does not
  70. really work on Anna's Archive.
  71. """
  72. def init(engine_settings=None): # pylint: disable=unused-argument
  73. """Check of engine's settings."""
  74. traits = EngineTraits(**ENGINE_TRAITS['annas archive'])
  75. if aa_content and aa_content not in traits.custom['content']:
  76. raise ValueError(f'invalid setting content: {aa_content}')
  77. if aa_sort and aa_sort not in traits.custom['sort']:
  78. raise ValueError(f'invalid setting sort: {aa_sort}')
  79. if aa_ext and aa_ext not in traits.custom['ext']:
  80. raise ValueError(f'invalid setting ext: {aa_ext}')
  81. def request(query, params: Dict[str, Any]) -> Dict[str, Any]:
  82. q = quote(query)
  83. lang = traits.get_language(params["language"], traits.all_locale) # type: ignore
  84. params["url"] = base_url + f"/search?lang={lang or ''}&content={aa_content}&ext={aa_ext}&sort={aa_sort}&q={q}"
  85. return params
  86. def response(resp) -> List[Dict[str, Optional[str]]]:
  87. results: List[Dict[str, Optional[str]]] = []
  88. dom = html.fromstring(resp.text)
  89. for item in eval_xpath_list(dom, '//main//div[contains(@class, "h-[125]")]/a'):
  90. results.append(_get_result(item))
  91. # The rendering of the WEB page is very strange; except the first position
  92. # all other positions of Anna's result page are enclosed in SGML comments.
  93. # These comments are *uncommented* by some JS code, see query of class
  94. # '.js-scroll-hidden' in Anna's HTML template:
  95. # https://annas-software.org/AnnaArchivist/annas-archive/-/blob/main/allthethings/templates/macros/md5_list.html
  96. for item in eval_xpath_list(dom, '//main//div[contains(@class, "js-scroll-hidden")]'):
  97. item = html.fromstring(item.xpath('./comment()')[0].text)
  98. results.append(_get_result(item))
  99. return results
  100. def _get_result(item):
  101. return {
  102. 'template': 'paper.html',
  103. 'url': base_url + item.xpath('./@href')[0],
  104. 'title': extract_text(eval_xpath(item, './/h3/text()[1]')),
  105. 'publisher': extract_text(eval_xpath(item, './/div[contains(@class, "text-sm")]')),
  106. 'authors': [extract_text(eval_xpath(item, './/div[contains(@class, "italic")]'))],
  107. 'content': extract_text(eval_xpath(item, './/div[contains(@class, "text-xs")]')),
  108. 'img_src': item.xpath('.//img/@src')[0],
  109. }
  110. def fetch_traits(engine_traits: EngineTraits):
  111. """Fetch languages and other search arguments from Anna's search form."""
  112. # pylint: disable=import-outside-toplevel
  113. import babel
  114. from searx.network import get # see https://github.com/searxng/searxng/issues/762
  115. from searx.locales import language_tag
  116. engine_traits.all_locale = ''
  117. engine_traits.custom['content'] = []
  118. engine_traits.custom['ext'] = []
  119. engine_traits.custom['sort'] = []
  120. resp = get(base_url + '/search')
  121. if not resp.ok: # type: ignore
  122. raise RuntimeError("Response from Anna's search page is not OK.")
  123. dom = html.fromstring(resp.text) # type: ignore
  124. # supported language codes
  125. lang_map = {}
  126. for x in eval_xpath_list(dom, "//form//select[@name='lang']//option"):
  127. eng_lang = x.get("value")
  128. if eng_lang in ('', '_empty', 'nl-BE', 'und'):
  129. continue
  130. try:
  131. locale = babel.Locale.parse(lang_map.get(eng_lang, eng_lang), sep='-')
  132. except babel.UnknownLocaleError:
  133. # silently ignore unknown languages
  134. # print("ERROR: %s -> %s is unknown by babel" % (x.get("data-name"), eng_lang))
  135. continue
  136. sxng_lang = language_tag(locale)
  137. conflict = engine_traits.languages.get(sxng_lang)
  138. if conflict:
  139. if conflict != eng_lang:
  140. print("CONFLICT: babel %s --> %s, %s" % (sxng_lang, conflict, eng_lang))
  141. continue
  142. engine_traits.languages[sxng_lang] = eng_lang
  143. for x in eval_xpath_list(dom, "//form//select[@name='content']//option"):
  144. engine_traits.custom['content'].append(x.get("value"))
  145. for x in eval_xpath_list(dom, "//form//select[@name='ext']//option"):
  146. engine_traits.custom['ext'].append(x.get("value"))
  147. for x in eval_xpath_list(dom, "//form//select[@name='sort']//option"):
  148. engine_traits.custom['sort'].append(x.get("value"))