wikidata.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491
  1. # -*- coding: utf-8 -*-
  2. """
  3. Wikidata
  4. @website https://wikidata.org
  5. @provide-api yes (https://wikidata.org/w/api.php)
  6. @using-api partially (most things require scraping)
  7. @results JSON, HTML
  8. @stable no (html can change)
  9. @parse url, infobox
  10. """
  11. from searx import logger
  12. from searx.poolrequests import get
  13. from searx.engines.xpath import extract_text
  14. from searx.utils import format_date_by_locale
  15. from searx.engines.wikipedia import supported_languages
  16. from json import loads
  17. from lxml.html import fromstring
  18. from urllib import urlencode
  19. logger = logger.getChild('wikidata')
  20. result_count = 1
  21. # urls
  22. wikidata_host = 'https://www.wikidata.org'
  23. url_search = wikidata_host \
  24. + '/wiki/Special:ItemDisambiguation?{query}'
  25. wikidata_api = wikidata_host + '/w/api.php'
  26. url_detail = wikidata_api\
  27. + '?action=parse&format=json&{query}'\
  28. + '&redirects=1&prop=text%7Cdisplaytitle%7Clanglinks%7Crevid'\
  29. + '&disableeditsection=1&disabletidy=1&preview=1&sectionpreview=1&disabletoc=1&utf8=1&formatversion=2'
  30. url_map = 'https://www.openstreetmap.org/'\
  31. + '?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
  32. url_image = 'https://commons.wikimedia.org/wiki/Special:FilePath/{filename}?width=500&height=400'
  33. # xpaths
  34. wikidata_ids_xpath = '//div/ul[@class="wikibase-disambiguation"]/li/a/@title'
  35. title_xpath = '//*[contains(@class,"wikibase-title-label")]'
  36. description_xpath = '//div[contains(@class,"wikibase-entitytermsview-heading-description")]'
  37. property_xpath = '//div[@id="{propertyid}"]'
  38. label_xpath = './/div[contains(@class,"wikibase-statementgroupview-property-label")]/a'
  39. url_xpath = './/a[contains(@class,"external free") or contains(@class, "wb-external-id")]'
  40. wikilink_xpath = './/ul[contains(@class,"wikibase-sitelinklistview-listview")]'\
  41. + '/li[contains(@data-wb-siteid,"{wikiid}")]//a/@href'
  42. property_row_xpath = './/div[contains(@class,"wikibase-statementview")]'
  43. preferred_rank_xpath = './/span[contains(@class,"wikibase-rankselector-preferred")]'
  44. value_xpath = './/div[contains(@class,"wikibase-statementview-mainsnak")]'\
  45. + '/*/div[contains(@class,"wikibase-snakview-value")]'
  46. language_fallback_xpath = '//sup[contains(@class,"wb-language-fallback-indicator")]'
  47. calendar_name_xpath = './/sup[contains(@class,"wb-calendar-name")]'
  48. def request(query, params):
  49. language = params['language'].split('_')[0]
  50. if language == 'all':
  51. language = 'en'
  52. params['url'] = url_search.format(
  53. query=urlencode({'label': query,
  54. 'language': language}))
  55. return params
  56. def response(resp):
  57. results = []
  58. html = fromstring(resp.content)
  59. wikidata_ids = html.xpath(wikidata_ids_xpath)
  60. language = resp.search_params['language'].split('_')[0]
  61. if language == 'all':
  62. language = 'en'
  63. # TODO: make requests asynchronous to avoid timeout when result_count > 1
  64. for wikidata_id in wikidata_ids[:result_count]:
  65. url = url_detail.format(query=urlencode({'page': wikidata_id,
  66. 'uselang': language}))
  67. htmlresponse = get(url)
  68. jsonresponse = loads(htmlresponse.content)
  69. results += getDetail(jsonresponse, wikidata_id, language, resp.search_params['language'])
  70. return results
  71. def getDetail(jsonresponse, wikidata_id, language, locale):
  72. results = []
  73. urls = []
  74. attributes = []
  75. title = jsonresponse.get('parse', {}).get('displaytitle', {})
  76. result = jsonresponse.get('parse', {}).get('text', {})
  77. if not title or not result:
  78. return results
  79. title = fromstring(title)
  80. for elem in title.xpath(language_fallback_xpath):
  81. elem.getparent().remove(elem)
  82. title = extract_text(title.xpath(title_xpath))
  83. result = fromstring(result)
  84. for elem in result.xpath(language_fallback_xpath):
  85. elem.getparent().remove(elem)
  86. description = extract_text(result.xpath(description_xpath))
  87. # URLS
  88. # official website
  89. add_url(urls, result, 'P856', results=results)
  90. # wikipedia
  91. wikipedia_link_count = 0
  92. wikipedia_link = get_wikilink(result, language + 'wiki')
  93. if wikipedia_link:
  94. wikipedia_link_count += 1
  95. urls.append({'title': 'Wikipedia (' + language + ')',
  96. 'url': wikipedia_link})
  97. if language != 'en':
  98. wikipedia_en_link = get_wikilink(result, 'enwiki')
  99. if wikipedia_en_link:
  100. wikipedia_link_count += 1
  101. urls.append({'title': 'Wikipedia (en)',
  102. 'url': wikipedia_en_link})
  103. # TODO: get_wiki_firstlanguage
  104. # if wikipedia_link_count == 0:
  105. # more wikis
  106. add_url(urls, result, default_label='Wikivoyage (' + language + ')', link_type=language + 'wikivoyage')
  107. add_url(urls, result, default_label='Wikiquote (' + language + ')', link_type=language + 'wikiquote')
  108. add_url(urls, result, default_label='Wikimedia Commons', link_type='commonswiki')
  109. add_url(urls, result, 'P625', 'OpenStreetMap', link_type='geo')
  110. # musicbrainz
  111. add_url(urls, result, 'P434', 'MusicBrainz', 'http://musicbrainz.org/artist/')
  112. add_url(urls, result, 'P435', 'MusicBrainz', 'http://musicbrainz.org/work/')
  113. add_url(urls, result, 'P436', 'MusicBrainz', 'http://musicbrainz.org/release-group/')
  114. add_url(urls, result, 'P966', 'MusicBrainz', 'http://musicbrainz.org/label/')
  115. # IMDb
  116. add_url(urls, result, 'P345', 'IMDb', 'https://www.imdb.com/', link_type='imdb')
  117. # source code repository
  118. add_url(urls, result, 'P1324')
  119. # blog
  120. add_url(urls, result, 'P1581')
  121. # social media links
  122. add_url(urls, result, 'P2397', 'YouTube', 'https://www.youtube.com/channel/')
  123. add_url(urls, result, 'P1651', 'YouTube', 'https://www.youtube.com/watch?v=')
  124. add_url(urls, result, 'P2002', 'Twitter', 'https://twitter.com/')
  125. add_url(urls, result, 'P2013', 'Facebook', 'https://facebook.com/')
  126. add_url(urls, result, 'P2003', 'Instagram', 'https://instagram.com/')
  127. urls.append({'title': 'Wikidata',
  128. 'url': 'https://www.wikidata.org/wiki/'
  129. + wikidata_id + '?uselang=' + language})
  130. # INFOBOX ATTRIBUTES (ROWS)
  131. # DATES
  132. # inception date
  133. add_attribute(attributes, result, 'P571', date=True)
  134. # dissolution date
  135. add_attribute(attributes, result, 'P576', date=True)
  136. # start date
  137. add_attribute(attributes, result, 'P580', date=True)
  138. # end date
  139. add_attribute(attributes, result, 'P582', date=True)
  140. # date of birth
  141. add_attribute(attributes, result, 'P569', date=True)
  142. # date of death
  143. add_attribute(attributes, result, 'P570', date=True)
  144. # date of spacecraft launch
  145. add_attribute(attributes, result, 'P619', date=True)
  146. # date of spacecraft landing
  147. add_attribute(attributes, result, 'P620', date=True)
  148. # nationality
  149. add_attribute(attributes, result, 'P27')
  150. # country of origin
  151. add_attribute(attributes, result, 'P495')
  152. # country
  153. add_attribute(attributes, result, 'P17')
  154. # headquarters
  155. add_attribute(attributes, result, 'Q180')
  156. # PLACES
  157. # capital
  158. add_attribute(attributes, result, 'P36', trim=True)
  159. # head of state
  160. add_attribute(attributes, result, 'P35', trim=True)
  161. # head of government
  162. add_attribute(attributes, result, 'P6', trim=True)
  163. # type of government
  164. add_attribute(attributes, result, 'P122')
  165. # official language
  166. add_attribute(attributes, result, 'P37')
  167. # population
  168. add_attribute(attributes, result, 'P1082', trim=True)
  169. # area
  170. add_attribute(attributes, result, 'P2046')
  171. # currency
  172. add_attribute(attributes, result, 'P38', trim=True)
  173. # heigth (building)
  174. add_attribute(attributes, result, 'P2048')
  175. # MEDIA
  176. # platform (videogames)
  177. add_attribute(attributes, result, 'P400')
  178. # author
  179. add_attribute(attributes, result, 'P50')
  180. # creator
  181. add_attribute(attributes, result, 'P170')
  182. # director
  183. add_attribute(attributes, result, 'P57')
  184. # performer
  185. add_attribute(attributes, result, 'P175')
  186. # developer
  187. add_attribute(attributes, result, 'P178')
  188. # producer
  189. add_attribute(attributes, result, 'P162')
  190. # manufacturer
  191. add_attribute(attributes, result, 'P176')
  192. # screenwriter
  193. add_attribute(attributes, result, 'P58')
  194. # production company
  195. add_attribute(attributes, result, 'P272')
  196. # record label
  197. add_attribute(attributes, result, 'P264')
  198. # publisher
  199. add_attribute(attributes, result, 'P123')
  200. # original network
  201. add_attribute(attributes, result, 'P449')
  202. # distributor
  203. add_attribute(attributes, result, 'P750')
  204. # composer
  205. add_attribute(attributes, result, 'P86')
  206. # publication date
  207. add_attribute(attributes, result, 'P577', date=True)
  208. # genre
  209. add_attribute(attributes, result, 'P136')
  210. # original language
  211. add_attribute(attributes, result, 'P364')
  212. # isbn
  213. add_attribute(attributes, result, 'Q33057')
  214. # software license
  215. add_attribute(attributes, result, 'P275')
  216. # programming language
  217. add_attribute(attributes, result, 'P277')
  218. # version
  219. add_attribute(attributes, result, 'P348', trim=True)
  220. # narrative location
  221. add_attribute(attributes, result, 'P840')
  222. # LANGUAGES
  223. # number of speakers
  224. add_attribute(attributes, result, 'P1098')
  225. # writing system
  226. add_attribute(attributes, result, 'P282')
  227. # regulatory body
  228. add_attribute(attributes, result, 'P1018')
  229. # language code
  230. add_attribute(attributes, result, 'P218')
  231. # OTHER
  232. # ceo
  233. add_attribute(attributes, result, 'P169', trim=True)
  234. # founder
  235. add_attribute(attributes, result, 'P112')
  236. # legal form (company/organization)
  237. add_attribute(attributes, result, 'P1454')
  238. # operator
  239. add_attribute(attributes, result, 'P137')
  240. # crew members (tripulation)
  241. add_attribute(attributes, result, 'P1029')
  242. # taxon
  243. add_attribute(attributes, result, 'P225')
  244. # chemical formula
  245. add_attribute(attributes, result, 'P274')
  246. # winner (sports/contests)
  247. add_attribute(attributes, result, 'P1346')
  248. # number of deaths
  249. add_attribute(attributes, result, 'P1120')
  250. # currency code
  251. add_attribute(attributes, result, 'P498')
  252. image = add_image(result)
  253. if len(attributes) == 0 and len(urls) == 2 and len(description) == 0:
  254. results.append({
  255. 'url': urls[0]['url'],
  256. 'title': title,
  257. 'content': description
  258. })
  259. else:
  260. results.append({
  261. 'infobox': title,
  262. 'id': wikipedia_link,
  263. 'content': description,
  264. 'img_src': image,
  265. 'attributes': attributes,
  266. 'urls': urls
  267. })
  268. return results
  269. # only returns first match
  270. def add_image(result):
  271. # P15: route map, P242: locator map, P154: logo, P18: image, P242: map, P41: flag, P2716: collage, P2910: icon
  272. property_ids = ['P15', 'P242', 'P154', 'P18', 'P242', 'P41', 'P2716', 'P2910']
  273. for property_id in property_ids:
  274. image = result.xpath(property_xpath.replace('{propertyid}', property_id))
  275. if image:
  276. image_name = image[0].xpath(value_xpath)
  277. image_src = url_image.replace('{filename}', extract_text(image_name[0]))
  278. return image_src
  279. # setting trim will only returned high ranked rows OR the first row
  280. def add_attribute(attributes, result, property_id, default_label=None, date=False, trim=False):
  281. attribute = result.xpath(property_xpath.replace('{propertyid}', property_id))
  282. if attribute:
  283. if default_label:
  284. label = default_label
  285. else:
  286. label = extract_text(attribute[0].xpath(label_xpath))
  287. label = label[0].upper() + label[1:]
  288. if date:
  289. trim = True
  290. # remove calendar name
  291. calendar_name = attribute[0].xpath(calendar_name_xpath)
  292. for calendar in calendar_name:
  293. calendar.getparent().remove(calendar)
  294. concat_values = ""
  295. values = []
  296. first_value = None
  297. for row in attribute[0].xpath(property_row_xpath):
  298. if not first_value or not trim or row.xpath(preferred_rank_xpath):
  299. value = row.xpath(value_xpath)
  300. if not value:
  301. continue
  302. value = extract_text(value)
  303. # save first value in case no ranked row is found
  304. if trim and not first_value:
  305. first_value = value
  306. else:
  307. # to avoid duplicate values
  308. if value not in values:
  309. concat_values += value + ", "
  310. values.append(value)
  311. if trim and not values:
  312. attributes.append({'label': label,
  313. 'value': first_value})
  314. else:
  315. attributes.append({'label': label,
  316. 'value': concat_values[:-2]})
  317. # requires property_id unless it's a wiki link (defined in link_type)
  318. def add_url(urls, result, property_id=None, default_label=None, url_prefix=None, results=None, link_type=None):
  319. links = []
  320. # wiki links don't have property in wikidata page
  321. if link_type and 'wiki' in link_type:
  322. links.append(get_wikilink(result, link_type))
  323. else:
  324. dom_element = result.xpath(property_xpath.replace('{propertyid}', property_id))
  325. if dom_element:
  326. dom_element = dom_element[0]
  327. if not default_label:
  328. label = extract_text(dom_element.xpath(label_xpath))
  329. label = label[0].upper() + label[1:]
  330. if link_type == 'geo':
  331. links.append(get_geolink(dom_element))
  332. elif link_type == 'imdb':
  333. links.append(get_imdblink(dom_element, url_prefix))
  334. else:
  335. url_results = dom_element.xpath(url_xpath)
  336. for link in url_results:
  337. if link is not None:
  338. if url_prefix:
  339. link = url_prefix + extract_text(link)
  340. else:
  341. link = extract_text(link)
  342. links.append(link)
  343. # append urls
  344. for url in links:
  345. if url is not None:
  346. urls.append({'title': default_label or label,
  347. 'url': url})
  348. if results is not None:
  349. results.append({'title': default_label or label,
  350. 'url': url})
  351. def get_imdblink(result, url_prefix):
  352. imdb_id = result.xpath(value_xpath)
  353. if imdb_id:
  354. imdb_id = extract_text(imdb_id)
  355. id_prefix = imdb_id[:2]
  356. if id_prefix == 'tt':
  357. url = url_prefix + 'title/' + imdb_id
  358. elif id_prefix == 'nm':
  359. url = url_prefix + 'name/' + imdb_id
  360. elif id_prefix == 'ch':
  361. url = url_prefix + 'character/' + imdb_id
  362. elif id_prefix == 'co':
  363. url = url_prefix + 'company/' + imdb_id
  364. elif id_prefix == 'ev':
  365. url = url_prefix + 'event/' + imdb_id
  366. else:
  367. url = None
  368. return url
  369. def get_geolink(result):
  370. coordinates = result.xpath(value_xpath)
  371. if not coordinates:
  372. return None
  373. coordinates = extract_text(coordinates[0])
  374. latitude, longitude = coordinates.split(',')
  375. # convert to decimal
  376. lat = int(latitude[:latitude.find(u'°')])
  377. if latitude.find('\'') >= 0:
  378. lat += int(latitude[latitude.find(u'°') + 1:latitude.find('\'')] or 0) / 60.0
  379. if latitude.find('"') >= 0:
  380. lat += float(latitude[latitude.find('\'') + 1:latitude.find('"')] or 0) / 3600.0
  381. if latitude.find('S') >= 0:
  382. lat *= -1
  383. lon = int(longitude[:longitude.find(u'°')])
  384. if longitude.find('\'') >= 0:
  385. lon += int(longitude[longitude.find(u'°') + 1:longitude.find('\'')] or 0) / 60.0
  386. if longitude.find('"') >= 0:
  387. lon += float(longitude[longitude.find('\'') + 1:longitude.find('"')] or 0) / 3600.0
  388. if longitude.find('W') >= 0:
  389. lon *= -1
  390. # TODO: get precision
  391. precision = 0.0002
  392. # there is no zoom information, deduce from precision (error prone)
  393. # samples :
  394. # 13 --> 5
  395. # 1 --> 6
  396. # 0.016666666666667 --> 9
  397. # 0.00027777777777778 --> 19
  398. # wolframalpha :
  399. # quadratic fit { {13, 5}, {1, 6}, {0.0166666, 9}, {0.0002777777,19}}
  400. # 14.1186-8.8322 x+0.625447 x^2
  401. if precision < 0.0003:
  402. zoom = 19
  403. else:
  404. zoom = int(15 - precision * 8.8322 + precision * precision * 0.625447)
  405. url = url_map\
  406. .replace('{latitude}', str(lat))\
  407. .replace('{longitude}', str(lon))\
  408. .replace('{zoom}', str(zoom))
  409. return url
  410. def get_wikilink(result, wikiid):
  411. url = result.xpath(wikilink_xpath.replace('{wikiid}', wikiid))
  412. if not url:
  413. return None
  414. url = url[0]
  415. if url.startswith('http://'):
  416. url = url.replace('http://', 'https://')
  417. elif url.startswith('//'):
  418. url = 'https:' + url
  419. return url