| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283 | """ Twitter (Social media) @website     https://twitter.com/ @provide-api yes (https://dev.twitter.com/docs/using-search) @using-api   no @results     HTML (using search portal) @stable      no (HTML can change) @parse       url, title, content @todo        publishedDate"""from urlparse import urljoinfrom urllib import urlencodefrom lxml import htmlfrom datetime import datetimefrom searx.engines.xpath import extract_text# engine dependent configcategories = ['social media']language_support = True# search-urlbase_url = 'https://twitter.com/'search_url = base_url + 'search?'# specific xpath variablesresults_xpath = '//li[@data-item-type="tweet"]'link_xpath = './/small[@class="time"]//a'title_xpath = './/span[contains(@class, "username")]'content_xpath = './/p[contains(@class, "tweet-text")]'timestamp_xpath = './/span[contains(@class,"_timestamp")]'# do search-requestdef request(query, params):    params['url'] = search_url + urlencode({'q': query})    # set language if specified    if params['language'] != 'all':        params['cookies']['lang'] = params['language'].split('_')[0]    else:        params['cookies']['lang'] = 'en'    return params# get response from search-requestdef response(resp):    results = []    dom = html.fromstring(resp.text)    # parse results    for tweet in dom.xpath(results_xpath):        try:            link = tweet.xpath(link_xpath)[0]            content = extract_text(tweet.xpath(content_xpath)[0])        except Exception:            continue        url = urljoin(base_url, link.attrib.get('href'))        title = extract_text(tweet.xpath(title_xpath))        pubdate = tweet.xpath(timestamp_xpath)        if len(pubdate) > 0:            timestamp = float(pubdate[0].attrib.get('data-time'))            publishedDate = datetime.fromtimestamp(timestamp, None)            # append result            results.append({'url': url,                            'title': title,                            'content': content,                            'publishedDate': publishedDate})        else:            # append result            results.append({'url': url,                            'title': title,                            'content': content})    # return results    return results
 |