Use MediaWiki API for search
parent
4e25fa7bf9
commit
13ca93a4e5
|
@ -12,10 +12,12 @@ author: mutantmonkey <mutantmonkey@gmail.com>
|
||||||
|
|
||||||
import re, urllib
|
import re, urllib
|
||||||
import web
|
import web
|
||||||
|
import json
|
||||||
|
|
||||||
|
wikiapi = 'https://wiki.archlinux.org/api.php?action=query&list=search&srsearch=%s&limit=1&prop=snippet&format=json'
|
||||||
wikiuri = 'https://wiki.archlinux.org/index.php/%s'
|
wikiuri = 'https://wiki.archlinux.org/index.php/%s'
|
||||||
wikisearch = 'https://wiki.archlinux.org/index.php/Special:Search?' \
|
wikisearch = 'https://wiki.archlinux.org/index.php/Special:Search?' \
|
||||||
+ 'search=%s&fulltext=Search'
|
+ 'search=%s&fulltext=Search'
|
||||||
|
|
||||||
r_tr = re.compile(r'(?ims)<tr[^>]*>.*?</tr>')
|
r_tr = re.compile(r'(?ims)<tr[^>]*>.*?</tr>')
|
||||||
r_content = re.compile(r'(?ims)</p>\n</div>.*?<!-- end content -->')
|
r_content = re.compile(r'(?ims)</p>\n</div>.*?<!-- end content -->')
|
||||||
|
@ -23,153 +25,63 @@ r_paragraph = re.compile(r'(?ims)<p[^>]*>.*?</p>|<li(?!n)[^>]*>.*?</li>')
|
||||||
r_tag = re.compile(r'<(?!!)[^>]+>')
|
r_tag = re.compile(r'<(?!!)[^>]+>')
|
||||||
r_whitespace = re.compile(r'[\t\r\n ]+')
|
r_whitespace = re.compile(r'[\t\r\n ]+')
|
||||||
r_redirect = re.compile(
|
r_redirect = re.compile(
|
||||||
r'(?ims)class=.redirectText.>\s*<a\s*href=./wiki/([^"/]+)'
|
r'(?ims)class=.redirectText.>\s*<a\s*href=./wiki/([^"/]+)'
|
||||||
)
|
)
|
||||||
|
|
||||||
abbrs = ['etc', 'ca', 'cf', 'Co', 'Ltd', 'Inc', 'Mt', 'Mr', 'Mrs',
|
abbrs = ['etc', 'ca', 'cf', 'Co', 'Ltd', 'Inc', 'Mt', 'Mr', 'Mrs',
|
||||||
'Dr', 'Ms', 'Rev', 'Fr', 'St', 'Sgt', 'pron', 'approx', 'lit',
|
'Dr', 'Ms', 'Rev', 'Fr', 'St', 'Sgt', 'pron', 'approx', 'lit',
|
||||||
'syn', 'transl', 'sess', 'fl', 'Op'] \
|
'syn', 'transl', 'sess', 'fl', 'Op'] \
|
||||||
+ list('ABCDEFGHIJKLMNOPQRSTUVWXYZ') \
|
+ list('ABCDEFGHIJKLMNOPQRSTUVWXYZ') \
|
||||||
+ list('abcdefghijklmnopqrstuvwxyz')
|
+ list('abcdefghijklmnopqrstuvwxyz')
|
||||||
t_sentence = r'^.{5,}?(?<!\b%s)(?:\.(?=[\[ ][A-Z0-9]|\Z)|\Z)'
|
t_sentence = r'^.{5,}?(?<!\b%s)(?:\.(?=[\[ ][A-Z0-9]|\Z)|\Z)'
|
||||||
r_sentence = re.compile(t_sentence % r')(?<!\b'.join(abbrs))
|
r_sentence = re.compile(t_sentence % r')(?<!\b'.join(abbrs))
|
||||||
|
|
||||||
def unescape(s):
|
def unescape(s):
|
||||||
s = s.replace('>', '>')
|
s = s.replace('>', '>')
|
||||||
s = s.replace('<', '<')
|
s = s.replace('<', '<')
|
||||||
s = s.replace('&', '&')
|
s = s.replace('&', '&')
|
||||||
s = s.replace(' ', ' ')
|
s = s.replace(' ', ' ')
|
||||||
return s
|
return s
|
||||||
|
|
||||||
def text(html):
|
def text(html):
|
||||||
html = r_tag.sub('', html)
|
html = r_tag.sub('', html)
|
||||||
html = r_whitespace.sub(' ', html)
|
html = r_whitespace.sub(' ', html)
|
||||||
return unescape(html).strip()
|
return unescape(html).strip()
|
||||||
|
|
||||||
def search(term):
|
|
||||||
try: import search
|
|
||||||
except ImportError, e:
|
|
||||||
print e
|
|
||||||
return term
|
|
||||||
|
|
||||||
if isinstance(term, unicode):
|
|
||||||
term = term.encode('utf-8')
|
|
||||||
else: term = term.decode('utf-8')
|
|
||||||
|
|
||||||
term = term.replace('_', ' ')
|
|
||||||
try: uri = search.result('site:wiki.archlinux.org %s' % term)
|
|
||||||
except IndexError: return term
|
|
||||||
if uri:
|
|
||||||
return uri[len('https://wiki.archlinux.org/index.php/'):]
|
|
||||||
else: return term
|
|
||||||
|
|
||||||
def archwiki(term, last=False):
|
def archwiki(term, last=False):
|
||||||
global wikiuri
|
global wikiapi, wikiuri
|
||||||
if not '%' in term:
|
url = wikiapi % term
|
||||||
if isinstance(term, unicode):
|
bytes = web.get(url)
|
||||||
t = term.encode('utf-8')
|
result = json.loads(bytes)
|
||||||
else: t = term
|
result = result['query']['search']
|
||||||
q = urllib.quote(t)
|
if len(result) <= 0:
|
||||||
u = wikiuri % q
|
return None
|
||||||
bytes = web.get(u)
|
term = result[0]['title']
|
||||||
else: bytes = web.get(wikiuri % term)
|
term = term.replace(' ', '_')
|
||||||
bytes = r_tr.sub('', bytes)
|
snippet = text(result[0]['snippet'])
|
||||||
|
return "%s - %s" % (snippet, wikiuri % term)
|
||||||
if not last:
|
|
||||||
r = r_redirect.search(bytes[:4096])
|
|
||||||
if r:
|
|
||||||
term = urllib.unquote(r.group(1))
|
|
||||||
return archwiki(term, last=True)
|
|
||||||
|
|
||||||
# kind of hacky fix to deal with Arch wiki template, should be cleaned up a bit
|
|
||||||
content = r_content.findall(bytes)
|
|
||||||
if not content or len(content) < 1:
|
|
||||||
return None
|
|
||||||
paragraphs = r_paragraph.findall(content[0])
|
|
||||||
print paragraphs
|
|
||||||
|
|
||||||
if not paragraphs:
|
|
||||||
if not last:
|
|
||||||
term = search(term)
|
|
||||||
return archwiki(term, last=True)
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Pre-process
|
|
||||||
paragraphs = [para for para in paragraphs
|
|
||||||
if (para and 'technical limitations' not in para
|
|
||||||
and 'window.showTocToggle' not in para
|
|
||||||
and 'Deletion_policy' not in para
|
|
||||||
and 'Template:AfD_footer' not in para
|
|
||||||
and not (para.startswith('<p><i>') and
|
|
||||||
para.endswith('</i></p>'))
|
|
||||||
and not 'disambiguation)"' in para)
|
|
||||||
and not '(images and media)' in para
|
|
||||||
and not 'This article contains a' in para
|
|
||||||
and not 'id="coordinates"' in para
|
|
||||||
and not 'class="thumb' in para
|
|
||||||
and not 'There is currently no text in this page.' in para]
|
|
||||||
# and not 'style="display:none"' in para]
|
|
||||||
|
|
||||||
for i, para in enumerate(paragraphs):
|
|
||||||
para = para.replace('<sup>', '|')
|
|
||||||
para = para.replace('</sup>', '|')
|
|
||||||
paragraphs[i] = text(para).strip()
|
|
||||||
|
|
||||||
# Post-process
|
|
||||||
paragraphs = [para for para in paragraphs if
|
|
||||||
(para and not (para.endswith(':') and len(para) < 150))]
|
|
||||||
|
|
||||||
para = text(paragraphs[0])
|
|
||||||
m = r_sentence.match(para)
|
|
||||||
|
|
||||||
if not m:
|
|
||||||
if not last:
|
|
||||||
term = search(term)
|
|
||||||
return archwiki(term, last=True)
|
|
||||||
return None
|
|
||||||
sentence = m.group(0)
|
|
||||||
|
|
||||||
maxlength = 275
|
|
||||||
if len(sentence) > maxlength:
|
|
||||||
sentence = sentence[:maxlength]
|
|
||||||
words = sentence[:-5].split(' ')
|
|
||||||
words.pop()
|
|
||||||
sentence = ' '.join(words) + ' [...]'
|
|
||||||
|
|
||||||
if (('using the Article Wizard if you wish' in sentence)
|
|
||||||
or ('or add a request for it' in sentence)):
|
|
||||||
if not last:
|
|
||||||
term = search(term)
|
|
||||||
return archwiki(term, last=True)
|
|
||||||
return None
|
|
||||||
|
|
||||||
sentence = '"' + sentence.replace('"', "'") + '"'
|
|
||||||
sentence = sentence.decode('utf-8').encode('utf-8')
|
|
||||||
wikiuri = wikiuri.decode('utf-8').encode('utf-8')
|
|
||||||
term = term.decode('utf-8').encode('utf-8')
|
|
||||||
return sentence + ' - ' + (wikiuri % term)
|
|
||||||
|
|
||||||
def awik(phenny, input):
|
def awik(phenny, input):
|
||||||
origterm = input.groups()[1]
|
origterm = input.groups()[1]
|
||||||
if not origterm:
|
if not origterm:
|
||||||
return phenny.say('Perhaps you meant ".awik dwm"?')
|
return phenny.say('Perhaps you meant ".awik dwm"?')
|
||||||
origterm = origterm.encode('utf-8')
|
origterm = origterm.encode('utf-8')
|
||||||
|
|
||||||
term = urllib.unquote(origterm)
|
term = urllib.unquote(origterm)
|
||||||
term = term[0].upper() + term[1:]
|
term = term[0].upper() + term[1:]
|
||||||
term = term.replace(' ', '_')
|
term = term.replace(' ', '_')
|
||||||
|
|
||||||
try: result = archwiki(term)
|
try: result = archwiki(term)
|
||||||
except IOError:
|
except IOError:
|
||||||
error = "Can't connect to wiki.archlinux.org (%s)" % (wikiuri % term)
|
error = "Can't connect to wiki.archlinux.org (%s)" % (wikiuri % term)
|
||||||
return phenny.say(error)
|
return phenny.say(error)
|
||||||
|
|
||||||
if result is not None:
|
if result is not None:
|
||||||
phenny.say(result)
|
phenny.say(result)
|
||||||
else: phenny.say('Can\'t find anything in the ArchWiki for "%s".' % origterm)
|
else: phenny.say('Can\'t find anything in the ArchWiki for "%s".' % origterm)
|
||||||
|
|
||||||
awik.commands = ['awik']
|
awik.commands = ['awik']
|
||||||
awik.priority = 'high'
|
awik.priority = 'high'
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
print __doc__.strip()
|
print __doc__.strip()
|
||||||
|
|
|
@ -12,158 +12,75 @@ author: mutantmonkey <mutantmonkey@gmail.com>
|
||||||
|
|
||||||
import re, urllib
|
import re, urllib
|
||||||
import web
|
import web
|
||||||
|
import json
|
||||||
|
|
||||||
|
wikiapi = 'https://vtluug.org/w/api.php?action=query&list=search&srsearch=%s&limit=1&prop=snippet&format=json'
|
||||||
wikiuri = 'https://vtluug.org/wiki/%s'
|
wikiuri = 'https://vtluug.org/wiki/%s'
|
||||||
wikisearch = 'https://vtluug.org/wiki/Special:Search?' \
|
wikisearch = 'https://vtluug.org/wiki/Special:Search?' \
|
||||||
+ 'search=%s&fulltext=Search'
|
+ 'search=%s&fulltext=Search'
|
||||||
|
|
||||||
r_tr = re.compile(r'(?ims)<tr[^>]*>.*?</tr>')
|
r_tr = re.compile(r'(?ims)<tr[^>]*>.*?</tr>')
|
||||||
r_paragraph = re.compile(r'(?ims)<p[^>]*>.*?</p>|<li(?!n)[^>]*>.*?</li>')
|
r_paragraph = re.compile(r'(?ims)<p[^>]*>.*?</p>|<li(?!n)[^>]*>.*?</li>')
|
||||||
r_tag = re.compile(r'<(?!!)[^>]+>')
|
r_tag = re.compile(r'<(?!!)[^>]+>')
|
||||||
r_whitespace = re.compile(r'[\t\r\n ]+')
|
r_whitespace = re.compile(r'[\t\r\n ]+')
|
||||||
r_redirect = re.compile(
|
r_redirect = re.compile(
|
||||||
r'(?ims)class=.redirectText.>\s*<a\s*href=./wiki/([^"/]+)'
|
r'(?ims)class=.redirectText.>\s*<a\s*href=./wiki/([^"/]+)'
|
||||||
)
|
)
|
||||||
|
|
||||||
abbrs = ['etc', 'ca', 'cf', 'Co', 'Ltd', 'Inc', 'Mt', 'Mr', 'Mrs',
|
abbrs = ['etc', 'ca', 'cf', 'Co', 'Ltd', 'Inc', 'Mt', 'Mr', 'Mrs',
|
||||||
'Dr', 'Ms', 'Rev', 'Fr', 'St', 'Sgt', 'pron', 'approx', 'lit',
|
'Dr', 'Ms', 'Rev', 'Fr', 'St', 'Sgt', 'pron', 'approx', 'lit',
|
||||||
'syn', 'transl', 'sess', 'fl', 'Op'] \
|
'syn', 'transl', 'sess', 'fl', 'Op'] \
|
||||||
+ list('ABCDEFGHIJKLMNOPQRSTUVWXYZ') \
|
+ list('ABCDEFGHIJKLMNOPQRSTUVWXYZ') \
|
||||||
+ list('abcdefghijklmnopqrstuvwxyz')
|
+ list('abcdefghijklmnopqrstuvwxyz')
|
||||||
t_sentence = r'^.{5,}?(?<!\b%s)(?:\.(?=[\[ ][A-Z0-9]|\Z)|\Z)'
|
t_sentence = r'^.{5,}?(?<!\b%s)(?:\.(?=[\[ ][A-Z0-9]|\Z)|\Z)'
|
||||||
r_sentence = re.compile(t_sentence % r')(?<!\b'.join(abbrs))
|
r_sentence = re.compile(t_sentence % r')(?<!\b'.join(abbrs))
|
||||||
|
|
||||||
def unescape(s):
|
def unescape(s):
|
||||||
s = s.replace('>', '>')
|
s = s.replace('>', '>')
|
||||||
s = s.replace('<', '<')
|
s = s.replace('<', '<')
|
||||||
s = s.replace('&', '&')
|
s = s.replace('&', '&')
|
||||||
s = s.replace(' ', ' ')
|
s = s.replace(' ', ' ')
|
||||||
return s
|
return s
|
||||||
|
|
||||||
def text(html):
|
def text(html):
|
||||||
html = r_tag.sub('', html)
|
html = r_tag.sub('', html)
|
||||||
html = r_whitespace.sub(' ', html)
|
html = r_whitespace.sub(' ', html)
|
||||||
return unescape(html).strip()
|
return unescape(html).strip()
|
||||||
|
|
||||||
def search(term):
|
|
||||||
try: import search
|
|
||||||
except ImportError, e:
|
|
||||||
print e
|
|
||||||
return term
|
|
||||||
|
|
||||||
if isinstance(term, unicode):
|
|
||||||
term = term.encode('utf-8')
|
|
||||||
else: term = term.decode('utf-8')
|
|
||||||
|
|
||||||
term = term.replace('_', ' ')
|
|
||||||
try: uri = search.result('site:vtluug.org %s' % term)
|
|
||||||
except IndexError: return term
|
|
||||||
if uri:
|
|
||||||
return uri[len('http://vtluug.org/wiki/'):]
|
|
||||||
else: return term
|
|
||||||
|
|
||||||
def vtluugwiki(term, last=False):
|
def vtluugwiki(term, last=False):
|
||||||
global wikiuri
|
global wikiapi, wikiuri
|
||||||
if not '%' in term:
|
url = wikiapi % term
|
||||||
if isinstance(term, unicode):
|
bytes = web.get(url)
|
||||||
t = term.encode('utf-8')
|
result = json.loads(bytes)
|
||||||
else: t = term
|
result = result['query']['search']
|
||||||
q = urllib.quote(t)
|
if len(result) <= 0:
|
||||||
u = wikiuri % q
|
return None
|
||||||
bytes = web.get(u)
|
term = result[0]['title']
|
||||||
else: bytes = web.get(wikiuri % term)
|
term = term.replace(' ', '_')
|
||||||
bytes = r_tr.sub('', bytes)
|
snippet = text(result[0]['snippet'])
|
||||||
|
return "%s - %s" % (snippet, wikiuri % term)
|
||||||
if not last:
|
|
||||||
r = r_redirect.search(bytes[:4096])
|
|
||||||
if r:
|
|
||||||
term = urllib.unquote(r.group(1))
|
|
||||||
return vtluugwiki(term, last=True)
|
|
||||||
|
|
||||||
paragraphs = r_paragraph.findall(bytes)
|
|
||||||
|
|
||||||
if not paragraphs:
|
|
||||||
if not last:
|
|
||||||
term = search(term)
|
|
||||||
return vtluugwiki(term, last=True)
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Pre-process
|
|
||||||
paragraphs = [para for para in paragraphs
|
|
||||||
if (para and 'technical limitations' not in para
|
|
||||||
and 'window.showTocToggle' not in para
|
|
||||||
and 'Deletion_policy' not in para
|
|
||||||
and 'Template:AfD_footer' not in para
|
|
||||||
and not (para.startswith('<p><i>') and
|
|
||||||
para.endswith('</i></p>'))
|
|
||||||
and not 'disambiguation)"' in para)
|
|
||||||
and not '(images and media)' in para
|
|
||||||
and not 'This article contains a' in para
|
|
||||||
and not 'id="coordinates"' in para
|
|
||||||
and not 'class="thumb' in para
|
|
||||||
and not 'There is currently no text in this page.' in para]
|
|
||||||
# and not 'style="display:none"' in para]
|
|
||||||
|
|
||||||
for i, para in enumerate(paragraphs):
|
|
||||||
para = para.replace('<sup>', '|')
|
|
||||||
para = para.replace('</sup>', '|')
|
|
||||||
paragraphs[i] = text(para).strip()
|
|
||||||
|
|
||||||
# Post-process
|
|
||||||
paragraphs = [para for para in paragraphs if
|
|
||||||
(para and not (para.endswith(':') and len(para) < 150))]
|
|
||||||
|
|
||||||
para = text(paragraphs[0])
|
|
||||||
m = r_sentence.match(para)
|
|
||||||
|
|
||||||
if not m:
|
|
||||||
if not last:
|
|
||||||
term = search(term)
|
|
||||||
return vtluugwiki(term, last=True)
|
|
||||||
return None
|
|
||||||
sentence = m.group(0)
|
|
||||||
|
|
||||||
maxlength = 275
|
|
||||||
if len(sentence) > maxlength:
|
|
||||||
sentence = sentence[:maxlength]
|
|
||||||
words = sentence[:-5].split(' ')
|
|
||||||
words.pop()
|
|
||||||
sentence = ' '.join(words) + ' [...]'
|
|
||||||
|
|
||||||
if (('using the Article Wizard if you wish' in sentence)
|
|
||||||
or ('or add a request for it' in sentence)):
|
|
||||||
if not last:
|
|
||||||
term = search(term)
|
|
||||||
return vtluugwiki(term, last=True)
|
|
||||||
return None
|
|
||||||
|
|
||||||
sentence = '"' + sentence.replace('"', "'") + '"'
|
|
||||||
sentence = sentence.decode('utf-8').encode('utf-8')
|
|
||||||
wikiuri = wikiuri.decode('utf-8').encode('utf-8')
|
|
||||||
term = term.decode('utf-8').encode('utf-8')
|
|
||||||
return sentence + ' - ' + (wikiuri % term)
|
|
||||||
|
|
||||||
def vtluug(phenny, input):
|
def vtluug(phenny, input):
|
||||||
origterm = input.groups()[1]
|
origterm = input.groups()[1]
|
||||||
if not origterm:
|
if not origterm:
|
||||||
return phenny.say('Perhaps you meant ".vtluug Zen"?')
|
return phenny.say('Perhaps you meant ".vtluug Zen"?')
|
||||||
origterm = origterm.encode('utf-8')
|
origterm = origterm.encode('utf-8')
|
||||||
|
|
||||||
term = urllib.unquote(origterm)
|
term = urllib.unquote(origterm)
|
||||||
term = term[0].upper() + term[1:]
|
term = term[0].upper() + term[1:]
|
||||||
term = term.replace(' ', '_')
|
term = term.replace(' ', '_')
|
||||||
|
|
||||||
try: result = vtluugwiki(term)
|
try: result = vtluugwiki(term)
|
||||||
except IOError:
|
except IOError:
|
||||||
error = "Can't connect to vtluug.org (%s)" % (wikiuri % term)
|
error = "Can't connect to vtluug.org (%s)" % (wikiuri % term)
|
||||||
return phenny.say(error)
|
return phenny.say(error)
|
||||||
|
|
||||||
if result is not None:
|
if result is not None:
|
||||||
phenny.say(result)
|
phenny.say(result)
|
||||||
else: phenny.say('Can\'t find anything in the VTLUUG Wiki for "%s".' % origterm)
|
else: phenny.say('Can\'t find anything in the VTLUUG Wiki for "%s".' % origterm)
|
||||||
|
|
||||||
vtluug.commands = ['vtluug']
|
vtluug.commands = ['vtluug']
|
||||||
vtluug.priority = 'high'
|
vtluug.priority = 'high'
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
print __doc__.strip()
|
print __doc__.strip()
|
||||||
|
|
|
@ -9,165 +9,75 @@ http://inamidst.com/phenny/
|
||||||
|
|
||||||
import re, urllib
|
import re, urllib
|
||||||
import web
|
import web
|
||||||
|
import json
|
||||||
|
|
||||||
wikiuri = 'http://%s.wikipedia.org/wiki/%s'
|
wikiapi = 'http://en.wikipedia.org/w/api.php?action=query&list=search&srsearch=%s&limit=1&prop=snippet&format=json'
|
||||||
# wikisearch = 'http://%s.wikipedia.org/wiki/Special:Search?' \
|
wikiuri = 'http://en.wikipedia.org/wiki/%s'
|
||||||
# + 'search=%s&fulltext=Search'
|
wikisearch = 'http://en.wikipedia.org/wiki/Special:Search?' \
|
||||||
|
+ 'search=%s&fulltext=Search'
|
||||||
|
|
||||||
r_tr = re.compile(r'(?ims)<tr[^>]*>.*?</tr>')
|
r_tr = re.compile(r'(?ims)<tr[^>]*>.*?</tr>')
|
||||||
r_paragraph = re.compile(r'(?ims)<p[^>]*>.*?</p>|<li(?!n)[^>]*>.*?</li>')
|
r_paragraph = re.compile(r'(?ims)<p[^>]*>.*?</p>|<li(?!n)[^>]*>.*?</li>')
|
||||||
r_tag = re.compile(r'<(?!!)[^>]+>')
|
r_tag = re.compile(r'<(?!!)[^>]+>')
|
||||||
r_whitespace = re.compile(r'[\t\r\n ]+')
|
r_whitespace = re.compile(r'[\t\r\n ]+')
|
||||||
r_redirect = re.compile(
|
r_redirect = re.compile(
|
||||||
r'(?ims)class=.redirectText.>\s*<a\s*href=./wiki/([^"/]+)'
|
r'(?ims)class=.redirectText.>\s*<a\s*href=./wiki/([^"/]+)'
|
||||||
)
|
)
|
||||||
|
|
||||||
abbrs = ['etc', 'ca', 'cf', 'Co', 'Ltd', 'Inc', 'Mt', 'Mr', 'Mrs',
|
abbrs = ['etc', 'ca', 'cf', 'Co', 'Ltd', 'Inc', 'Mt', 'Mr', 'Mrs',
|
||||||
'Dr', 'Ms', 'Rev', 'Fr', 'St', 'Sgt', 'pron', 'approx', 'lit',
|
'Dr', 'Ms', 'Rev', 'Fr', 'St', 'Sgt', 'pron', 'approx', 'lit',
|
||||||
'syn', 'transl', 'sess', 'fl', 'Op', 'Dec'] \
|
'syn', 'transl', 'sess', 'fl', 'Op', 'Dec'] \
|
||||||
+ list('ABCDEFGHIJKLMNOPQRSTUVWXYZ') \
|
+ list('ABCDEFGHIJKLMNOPQRSTUVWXYZ') \
|
||||||
+ list('abcdefghijklmnopqrstuvwxyz')
|
+ list('abcdefghijklmnopqrstuvwxyz')
|
||||||
t_sentence = r'^.{5,}?(?<!\b%s)(?:\.(?=[\[ ][A-Z0-9]|\Z)|\Z)'
|
t_sentence = r'^.{5,}?(?<!\b%s)(?:\.(?=[\[ ][A-Z0-9]|\Z)|\Z)'
|
||||||
r_sentence = re.compile(t_sentence % r')(?<!\b'.join(abbrs))
|
r_sentence = re.compile(t_sentence % r')(?<!\b'.join(abbrs))
|
||||||
|
|
||||||
def unescape(s):
|
def unescape(s):
|
||||||
s = s.replace('>', '>')
|
s = s.replace('>', '>')
|
||||||
s = s.replace('<', '<')
|
s = s.replace('<', '<')
|
||||||
s = s.replace('&', '&')
|
s = s.replace('&', '&')
|
||||||
s = s.replace(' ', ' ')
|
s = s.replace(' ', ' ')
|
||||||
return s
|
return s
|
||||||
|
|
||||||
def text(html):
|
def text(html):
|
||||||
html = r_tag.sub('', html)
|
html = r_tag.sub('', html)
|
||||||
html = r_whitespace.sub(' ', html)
|
html = r_whitespace.sub(' ', html)
|
||||||
return unescape(html).strip()
|
return unescape(html).strip()
|
||||||
|
|
||||||
def search(term):
|
def wikipedia(term, last=False):
|
||||||
try: import search
|
global wikiapi, wikiuri
|
||||||
except ImportError, e:
|
url = wikiapi % term
|
||||||
print e
|
bytes = web.get(url)
|
||||||
return term
|
result = json.loads(bytes)
|
||||||
|
result = result['query']['search']
|
||||||
if isinstance(term, unicode):
|
if len(result) <= 0:
|
||||||
term = term.encode('utf-8')
|
return None
|
||||||
else: term = term.decode('utf-8')
|
term = result[0]['title']
|
||||||
|
term = term.replace(' ', '_')
|
||||||
term = term.replace('_', ' ')
|
snippet = text(result[0]['snippet'])
|
||||||
try: uri = search.result('site:en.wikipedia.org %s' % term)
|
return "%s - %s" % (snippet, wikiuri % term)
|
||||||
except IndexError: return term
|
|
||||||
if uri:
|
|
||||||
return uri[len('http://en.wikipedia.org/wiki/'):]
|
|
||||||
else: return term
|
|
||||||
|
|
||||||
def wikipedia(term, language='en', last=False):
|
|
||||||
global wikiuri
|
|
||||||
if not '%' in term:
|
|
||||||
if isinstance(term, unicode):
|
|
||||||
t = term.encode('utf-8')
|
|
||||||
else: t = term
|
|
||||||
q = urllib.quote(t)
|
|
||||||
u = wikiuri % (language, q)
|
|
||||||
bytes = web.get(u)
|
|
||||||
else: bytes = web.get(wikiuri % (language, term))
|
|
||||||
bytes = r_tr.sub('', bytes)
|
|
||||||
|
|
||||||
if not last:
|
|
||||||
r = r_redirect.search(bytes[:4096])
|
|
||||||
if r:
|
|
||||||
term = urllib.unquote(r.group(1))
|
|
||||||
return wikipedia(term, language=language, last=True)
|
|
||||||
|
|
||||||
paragraphs = r_paragraph.findall(bytes)
|
|
||||||
|
|
||||||
if not paragraphs:
|
|
||||||
if not last:
|
|
||||||
term = search(term)
|
|
||||||
return wikipedia(term, language=language, last=True)
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Pre-process
|
|
||||||
paragraphs = [para for para in paragraphs
|
|
||||||
if (para and 'technical limitations' not in para
|
|
||||||
and 'window.showTocToggle' not in para
|
|
||||||
and 'Deletion_policy' not in para
|
|
||||||
and 'Template:AfD_footer' not in para
|
|
||||||
and not (para.startswith('<p><i>') and
|
|
||||||
para.endswith('</i></p>'))
|
|
||||||
and not 'disambiguation)"' in para)
|
|
||||||
and not '(images and media)' in para
|
|
||||||
and not 'This article contains a' in para
|
|
||||||
and not 'id="coordinates"' in para
|
|
||||||
and not 'class="thumb' in para]
|
|
||||||
# and not 'style="display:none"' in para]
|
|
||||||
|
|
||||||
for i, para in enumerate(paragraphs):
|
|
||||||
para = para.replace('<sup>', '|')
|
|
||||||
para = para.replace('</sup>', '|')
|
|
||||||
paragraphs[i] = text(para).strip()
|
|
||||||
|
|
||||||
# Post-process
|
|
||||||
paragraphs = [para for para in paragraphs if
|
|
||||||
(para and not (para.endswith(':') and len(para) < 150))]
|
|
||||||
|
|
||||||
para = text(paragraphs[0])
|
|
||||||
m = r_sentence.match(para)
|
|
||||||
|
|
||||||
if not m:
|
|
||||||
if not last:
|
|
||||||
term = search(term)
|
|
||||||
return wikipedia(term, language=language, last=True)
|
|
||||||
return None
|
|
||||||
sentence = m.group(0)
|
|
||||||
|
|
||||||
maxlength = 275
|
|
||||||
if len(sentence) > maxlength:
|
|
||||||
sentence = sentence[:maxlength]
|
|
||||||
words = sentence[:-5].split(' ')
|
|
||||||
words.pop()
|
|
||||||
sentence = ' '.join(words) + ' [...]'
|
|
||||||
|
|
||||||
if (('using the Article Wizard if you wish' in sentence)
|
|
||||||
or ('or add a request for it' in sentence)
|
|
||||||
or ('in existing articles' in sentence)):
|
|
||||||
if not last:
|
|
||||||
term = search(term)
|
|
||||||
return wikipedia(term, language=language, last=True)
|
|
||||||
return None
|
|
||||||
|
|
||||||
sentence = '"' + sentence.replace('"', "'") + '"'
|
|
||||||
sentence = sentence.decode('utf-8').encode('utf-8')
|
|
||||||
wikiuri = wikiuri.decode('utf-8').encode('utf-8')
|
|
||||||
term = term.decode('utf-8').encode('utf-8')
|
|
||||||
return sentence + ' - ' + (wikiuri % (language, term))
|
|
||||||
|
|
||||||
def wik(phenny, input):
|
def wik(phenny, input):
|
||||||
origterm = input.groups()[1]
|
origterm = input.groups()[1]
|
||||||
if not origterm:
|
if not origterm:
|
||||||
return phenny.say('Perhaps you meant ".wik Zen"?')
|
return phenny.say('Perhaps you meant ".wik Zen"?')
|
||||||
origterm = origterm.encode('utf-8')
|
origterm = origterm.encode('utf-8')
|
||||||
|
|
||||||
term = urllib.unquote(origterm)
|
term = urllib.unquote(origterm)
|
||||||
language = 'en'
|
term = term[0].upper() + term[1:]
|
||||||
if term.startswith(':') and (' ' in term):
|
term = term.replace(' ', '_')
|
||||||
a, b = term.split(' ', 1)
|
|
||||||
a = a.lstrip(':')
|
|
||||||
if a.isalpha():
|
|
||||||
language, term = a, b
|
|
||||||
term = term[0].upper() + term[1:]
|
|
||||||
term = term.replace(' ', '_')
|
|
||||||
|
|
||||||
try: result = wikipedia(term, language)
|
try: result = wikipedia(term)
|
||||||
except IOError:
|
except IOError:
|
||||||
args = (language, wikiuri % (language, term))
|
error = "Can't connect to en.wikipedia.org (%s)" % (wikiuri % term)
|
||||||
error = "Can't connect to %s.wikipedia.org (%s)" % args
|
return phenny.say(error)
|
||||||
return phenny.say(error)
|
|
||||||
|
|
||||||
if result is not None:
|
if result is not None:
|
||||||
phenny.say(result)
|
phenny.say(result)
|
||||||
else: phenny.say('Can\'t find anything in Wikipedia for "%s".' % origterm)
|
else: phenny.say('Can\'t find anything in Wikipedia for "%s".' % origterm)
|
||||||
|
|
||||||
wik.commands = ['wik']
|
wik.commands = ['wik']
|
||||||
wik.priority = 'high'
|
wik.priority = 'high'
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
print __doc__.strip()
|
print __doc__.strip()
|
||||||
|
|
Loading…
Reference in New Issue