wikidata.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. import json
  2. from searx import logger
  3. from searx.poolrequests import get
  4. from searx.utils import format_date_by_locale
  5. from datetime import datetime
  6. from dateutil.parser import parse as dateutil_parse
  7. from urllib import urlencode
  8. from lxml.html import fromstring
  9. logger = logger.getChild('wikidata')
  10. result_count = 1
  11. wikidata_host = 'https://www.wikidata.org'
  12. url_search = wikidata_host \
  13. + '/wiki/Special:ItemDisambiguation?{query}'
  14. wikidata_api = wikidata_host + '/w/api.php'
  15. url_detail = wikidata_api\
  16. + '?action=wbgetentities&format=json'\
  17. + '&props=labels%7Cinfo%7Csitelinks'\
  18. + '%7Csitelinks%2Furls%7Cdescriptions%7Cclaims'\
  19. + '&{query}'
  20. url_map = 'https://www.openstreetmap.org/'\
  21. + '?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
  22. url_entity_label = wikidata_api\
  23. + '?action=wbgetentities&format=json&props=labels&{query}'
  24. wikidata_ids_xpath = '//div/ul[@class="wikibase-disambiguation"]/li/a/@title'
  25. def request(query, params):
  26. language = params['language'].split('_')[0]
  27. if language == 'all':
  28. language = 'en'
  29. params['url'] = url_search.format(
  30. query=urlencode({'label': query,
  31. 'language': language}))
  32. return params
  33. def response(resp):
  34. results = []
  35. html = fromstring(resp.content)
  36. wikidata_ids = html.xpath(wikidata_ids_xpath)
  37. language = resp.search_params['language'].split('_')[0]
  38. if language == 'all':
  39. language = 'en'
  40. url = url_detail.format(query=urlencode({'ids': '|'.join(wikidata_ids),
  41. 'languages': language + '|en'}))
  42. htmlresponse = get(url)
  43. jsonresponse = json.loads(htmlresponse.content)
  44. for wikidata_id in wikidata_ids[:result_count]:
  45. results = results + getDetail(jsonresponse, wikidata_id, language, resp.search_params['language'])
  46. return results
  47. def getDetail(jsonresponse, wikidata_id, language, locale):
  48. results = []
  49. urls = []
  50. attributes = []
  51. result = jsonresponse.get('entities', {}).get(wikidata_id, {})
  52. title = result.get('labels', {}).get(language, {}).get('value', None)
  53. if title is None:
  54. title = result.get('labels', {}).get('en', {}).get('value', None)
  55. if title is None:
  56. return results
  57. description = result\
  58. .get('descriptions', {})\
  59. .get(language, {})\
  60. .get('value', None)
  61. if description is None:
  62. description = result\
  63. .get('descriptions', {})\
  64. .get('en', {})\
  65. .get('value', '')
  66. claims = result.get('claims', {})
  67. official_website = get_string(claims, 'P856', None)
  68. if official_website is not None:
  69. urls.append({'title': get_label('P856', language), 'url': official_website})
  70. results.append({'title': title, 'url': official_website})
  71. wikipedia_link_count = 0
  72. wikipedia_link = get_wikilink(result, language + 'wiki')
  73. wikipedia_link_count += add_url(urls,
  74. 'Wikipedia (' + language + ')',
  75. wikipedia_link)
  76. if language != 'en':
  77. wikipedia_en_link = get_wikilink(result, 'enwiki')
  78. wikipedia_link_count += add_url(urls,
  79. 'Wikipedia (en)',
  80. wikipedia_en_link)
  81. if wikipedia_link_count == 0:
  82. misc_language = get_wiki_firstlanguage(result, 'wiki')
  83. if misc_language is not None:
  84. add_url(urls,
  85. 'Wikipedia (' + misc_language + ')',
  86. get_wikilink(result, misc_language + 'wiki'))
  87. if language != 'en':
  88. add_url(urls,
  89. 'Wiki voyage (' + language + ')',
  90. get_wikilink(result, language + 'wikivoyage'))
  91. add_url(urls,
  92. 'Wiki voyage (en)',
  93. get_wikilink(result, 'enwikivoyage'))
  94. if language != 'en':
  95. add_url(urls,
  96. 'Wikiquote (' + language + ')',
  97. get_wikilink(result, language + 'wikiquote'))
  98. add_url(urls,
  99. 'Wikiquote (en)',
  100. get_wikilink(result, 'enwikiquote'))
  101. add_url(urls,
  102. 'Commons wiki',
  103. get_wikilink(result, 'commonswiki'))
  104. # Location
  105. add_url(urls,
  106. get_label('P625', language),
  107. get_geolink(claims, 'P625', None))
  108. add_url(urls,
  109. 'Wikidata',
  110. 'https://www.wikidata.org/wiki/'
  111. + wikidata_id + '?uselang=' + language)
  112. musicbrainz_work_id = get_string(claims, 'P435')
  113. if musicbrainz_work_id is not None:
  114. add_url(urls,
  115. 'MusicBrainz',
  116. 'http://musicbrainz.org/work/'
  117. + musicbrainz_work_id)
  118. musicbrainz_artist_id = get_string(claims, 'P434')
  119. if musicbrainz_artist_id is not None:
  120. add_url(urls,
  121. 'MusicBrainz',
  122. 'http://musicbrainz.org/artist/'
  123. + musicbrainz_artist_id)
  124. musicbrainz_release_group_id = get_string(claims, 'P436')
  125. if musicbrainz_release_group_id is not None:
  126. add_url(urls,
  127. 'MusicBrainz',
  128. 'http://musicbrainz.org/release-group/'
  129. + musicbrainz_release_group_id)
  130. musicbrainz_label_id = get_string(claims, 'P966')
  131. if musicbrainz_label_id is not None:
  132. add_url(urls,
  133. 'MusicBrainz',
  134. 'http://musicbrainz.org/label/'
  135. + musicbrainz_label_id)
  136. # musicbrainz_area_id = get_string(claims, 'P982')
  137. # P1407 MusicBrainz series ID
  138. # P1004 MusicBrainz place ID
  139. # P1330 MusicBrainz instrument ID
  140. # P1407 MusicBrainz series ID
  141. postal_code = get_string(claims, 'P281', None)
  142. if postal_code is not None:
  143. attributes.append({'label': get_label('P281', language), 'value': postal_code})
  144. date_of_birth = get_time(claims, 'P569', locale, None)
  145. if date_of_birth is not None:
  146. attributes.append({'label': get_label('P569', language), 'value': date_of_birth})
  147. date_of_death = get_time(claims, 'P570', locale, None)
  148. if date_of_death is not None:
  149. attributes.append({'label': get_label('P570', language), 'value': date_of_death})
  150. if len(attributes) == 0 and len(urls) == 2 and len(description) == 0:
  151. results.append({
  152. 'url': urls[0]['url'],
  153. 'title': title,
  154. 'content': description
  155. })
  156. else:
  157. results.append({
  158. 'infobox': title,
  159. 'id': wikipedia_link,
  160. 'content': description,
  161. 'attributes': attributes,
  162. 'urls': urls
  163. })
  164. return results
  165. def add_url(urls, title, url):
  166. if url is not None:
  167. urls.append({'title': title, 'url': url})
  168. return 1
  169. else:
  170. return 0
  171. def get_mainsnak(claims, propertyName):
  172. propValue = claims.get(propertyName, {})
  173. if len(propValue) == 0:
  174. return None
  175. propValue = propValue[0].get('mainsnak', None)
  176. return propValue
  177. def get_string(claims, propertyName, defaultValue=None):
  178. propValue = claims.get(propertyName, {})
  179. if len(propValue) == 0:
  180. return defaultValue
  181. result = []
  182. for e in propValue:
  183. mainsnak = e.get('mainsnak', {})
  184. datavalue = mainsnak.get('datavalue', {})
  185. if datavalue is not None:
  186. result.append(datavalue.get('value', ''))
  187. if len(result) == 0:
  188. return defaultValue
  189. else:
  190. # TODO handle multiple urls
  191. return result[0]
  192. def get_time(claims, propertyName, locale, defaultValue=None):
  193. propValue = claims.get(propertyName, {})
  194. if len(propValue) == 0:
  195. return defaultValue
  196. result = []
  197. for e in propValue:
  198. mainsnak = e.get('mainsnak', {})
  199. datavalue = mainsnak.get('datavalue', {})
  200. if datavalue is not None:
  201. value = datavalue.get('value', '')
  202. result.append(value.get('time', ''))
  203. if len(result) == 0:
  204. date_string = defaultValue
  205. else:
  206. date_string = ', '.join(result)
  207. try:
  208. parsed_date = datetime.strptime(date_string, "+%Y-%m-%dT%H:%M:%SZ")
  209. except:
  210. if date_string.startswith('-'):
  211. return date_string.split('T')[0]
  212. try:
  213. parsed_date = dateutil_parse(date_string, fuzzy=False, default=False)
  214. except:
  215. logger.debug('could not parse date %s', date_string)
  216. return date_string.split('T')[0]
  217. return format_date_by_locale(parsed_date, locale)
  218. def get_geolink(claims, propertyName, defaultValue=''):
  219. mainsnak = get_mainsnak(claims, propertyName)
  220. if mainsnak is None:
  221. return defaultValue
  222. datatype = mainsnak.get('datatype', '')
  223. datavalue = mainsnak.get('datavalue', {})
  224. if datatype != 'globe-coordinate':
  225. return defaultValue
  226. value = datavalue.get('value', {})
  227. precision = value.get('precision', 0.0002)
  228. # there is no zoom information, deduce from precision (error prone)
  229. # samples :
  230. # 13 --> 5
  231. # 1 --> 6
  232. # 0.016666666666667 --> 9
  233. # 0.00027777777777778 --> 19
  234. # wolframalpha :
  235. # quadratic fit { {13, 5}, {1, 6}, {0.0166666, 9}, {0.0002777777,19}}
  236. # 14.1186-8.8322 x+0.625447 x^2
  237. if precision < 0.0003:
  238. zoom = 19
  239. else:
  240. zoom = int(15 - precision * 8.8322 + precision * precision * 0.625447)
  241. url = url_map\
  242. .replace('{latitude}', str(value.get('latitude', 0)))\
  243. .replace('{longitude}', str(value.get('longitude', 0)))\
  244. .replace('{zoom}', str(zoom))
  245. return url
  246. def get_wikilink(result, wikiid):
  247. url = result.get('sitelinks', {}).get(wikiid, {}).get('url', None)
  248. if url is None:
  249. return url
  250. elif url.startswith('http://'):
  251. url = url.replace('http://', 'https://')
  252. elif url.startswith('//'):
  253. url = 'https:' + url
  254. return url
  255. def get_wiki_firstlanguage(result, wikipatternid):
  256. for k in result.get('sitelinks', {}).keys():
  257. if k.endswith(wikipatternid) and len(k) == (2 + len(wikipatternid)):
  258. return k[0:2]
  259. return None
  260. def get_label(entity_id, language):
  261. url = url_entity_label.format(query=urlencode({'ids': entity_id,
  262. 'languages': language + '|en'}))
  263. response = get(url)
  264. jsonresponse = json.loads(response.text)
  265. label = jsonresponse.get('entities', {}).get(entity_id, {}).get('labels', {}).get(language, {}).get('value', None)
  266. if label is None:
  267. label = jsonresponse['entities'][entity_id]['labels']['en']['value']
  268. return label