Use MediaWiki API for search

master
mutantmonkey 2011-07-15 13:43:27 -04:00
parent 4e25fa7bf9
commit 13ca93a4e5
3 changed files with 129 additions and 390 deletions

View File

@ -12,7 +12,9 @@ author: mutantmonkey <mutantmonkey@gmail.com>
import re, urllib
import web
import json
wikiapi = 'https://wiki.archlinux.org/api.php?action=query&list=search&srsearch=%s&limit=1&prop=snippet&format=json'
wikiuri = 'https://wiki.archlinux.org/index.php/%s'
wikisearch = 'https://wiki.archlinux.org/index.php/Special:Search?' \
+ 'search=%s&fulltext=Search'
@ -46,108 +48,18 @@ def text(html):
html = r_whitespace.sub(' ', html)
return unescape(html).strip()
def search(term):
try: import search
except ImportError, e:
print e
return term
if isinstance(term, unicode):
term = term.encode('utf-8')
else: term = term.decode('utf-8')
term = term.replace('_', ' ')
try: uri = search.result('site:wiki.archlinux.org %s' % term)
except IndexError: return term
if uri:
return uri[len('https://wiki.archlinux.org/index.php/'):]
else: return term
def archwiki(term, last=False):
global wikiuri
if not '%' in term:
if isinstance(term, unicode):
t = term.encode('utf-8')
else: t = term
q = urllib.quote(t)
u = wikiuri % q
bytes = web.get(u)
else: bytes = web.get(wikiuri % term)
bytes = r_tr.sub('', bytes)
if not last:
r = r_redirect.search(bytes[:4096])
if r:
term = urllib.unquote(r.group(1))
return archwiki(term, last=True)
# kind of hacky fix to deal with Arch wiki template, should be cleaned up a bit
content = r_content.findall(bytes)
if not content or len(content) < 1:
global wikiapi, wikiuri
url = wikiapi % term
bytes = web.get(url)
result = json.loads(bytes)
result = result['query']['search']
if len(result) <= 0:
return None
paragraphs = r_paragraph.findall(content[0])
print paragraphs
if not paragraphs:
if not last:
term = search(term)
return archwiki(term, last=True)
return None
# Pre-process
paragraphs = [para for para in paragraphs
if (para and 'technical limitations' not in para
and 'window.showTocToggle' not in para
and 'Deletion_policy' not in para
and 'Template:AfD_footer' not in para
and not (para.startswith('<p><i>') and
para.endswith('</i></p>'))
and not 'disambiguation)"' in para)
and not '(images and media)' in para
and not 'This article contains a' in para
and not 'id="coordinates"' in para
and not 'class="thumb' in para
and not 'There is currently no text in this page.' in para]
# and not 'style="display:none"' in para]
for i, para in enumerate(paragraphs):
para = para.replace('<sup>', '|')
para = para.replace('</sup>', '|')
paragraphs[i] = text(para).strip()
# Post-process
paragraphs = [para for para in paragraphs if
(para and not (para.endswith(':') and len(para) < 150))]
para = text(paragraphs[0])
m = r_sentence.match(para)
if not m:
if not last:
term = search(term)
return archwiki(term, last=True)
return None
sentence = m.group(0)
maxlength = 275
if len(sentence) > maxlength:
sentence = sentence[:maxlength]
words = sentence[:-5].split(' ')
words.pop()
sentence = ' '.join(words) + ' [...]'
if (('using the Article Wizard if you wish' in sentence)
or ('or add a request for it' in sentence)):
if not last:
term = search(term)
return archwiki(term, last=True)
return None
sentence = '"' + sentence.replace('"', "'") + '"'
sentence = sentence.decode('utf-8').encode('utf-8')
wikiuri = wikiuri.decode('utf-8').encode('utf-8')
term = term.decode('utf-8').encode('utf-8')
return sentence + ' - ' + (wikiuri % term)
term = result[0]['title']
term = term.replace(' ', '_')
snippet = text(result[0]['snippet'])
return "%s - %s" % (snippet, wikiuri % term)
def awik(phenny, input):
origterm = input.groups()[1]

View File

@ -12,7 +12,9 @@ author: mutantmonkey <mutantmonkey@gmail.com>
import re, urllib
import web
import json
wikiapi = 'https://vtluug.org/w/api.php?action=query&list=search&srsearch=%s&limit=1&prop=snippet&format=json'
wikiuri = 'https://vtluug.org/wiki/%s'
wikisearch = 'https://vtluug.org/wiki/Special:Search?' \
+ 'search=%s&fulltext=Search'
@ -45,103 +47,18 @@ def text(html):
html = r_whitespace.sub(' ', html)
return unescape(html).strip()
def search(term):
try: import search
except ImportError, e:
print e
return term
if isinstance(term, unicode):
term = term.encode('utf-8')
else: term = term.decode('utf-8')
term = term.replace('_', ' ')
try: uri = search.result('site:vtluug.org %s' % term)
except IndexError: return term
if uri:
return uri[len('http://vtluug.org/wiki/'):]
else: return term
def vtluugwiki(term, last=False):
global wikiuri
if not '%' in term:
if isinstance(term, unicode):
t = term.encode('utf-8')
else: t = term
q = urllib.quote(t)
u = wikiuri % q
bytes = web.get(u)
else: bytes = web.get(wikiuri % term)
bytes = r_tr.sub('', bytes)
if not last:
r = r_redirect.search(bytes[:4096])
if r:
term = urllib.unquote(r.group(1))
return vtluugwiki(term, last=True)
paragraphs = r_paragraph.findall(bytes)
if not paragraphs:
if not last:
term = search(term)
return vtluugwiki(term, last=True)
global wikiapi, wikiuri
url = wikiapi % term
bytes = web.get(url)
result = json.loads(bytes)
result = result['query']['search']
if len(result) <= 0:
return None
# Pre-process
paragraphs = [para for para in paragraphs
if (para and 'technical limitations' not in para
and 'window.showTocToggle' not in para
and 'Deletion_policy' not in para
and 'Template:AfD_footer' not in para
and not (para.startswith('<p><i>') and
para.endswith('</i></p>'))
and not 'disambiguation)"' in para)
and not '(images and media)' in para
and not 'This article contains a' in para
and not 'id="coordinates"' in para
and not 'class="thumb' in para
and not 'There is currently no text in this page.' in para]
# and not 'style="display:none"' in para]
for i, para in enumerate(paragraphs):
para = para.replace('<sup>', '|')
para = para.replace('</sup>', '|')
paragraphs[i] = text(para).strip()
# Post-process
paragraphs = [para for para in paragraphs if
(para and not (para.endswith(':') and len(para) < 150))]
para = text(paragraphs[0])
m = r_sentence.match(para)
if not m:
if not last:
term = search(term)
return vtluugwiki(term, last=True)
return None
sentence = m.group(0)
maxlength = 275
if len(sentence) > maxlength:
sentence = sentence[:maxlength]
words = sentence[:-5].split(' ')
words.pop()
sentence = ' '.join(words) + ' [...]'
if (('using the Article Wizard if you wish' in sentence)
or ('or add a request for it' in sentence)):
if not last:
term = search(term)
return vtluugwiki(term, last=True)
return None
sentence = '"' + sentence.replace('"', "'") + '"'
sentence = sentence.decode('utf-8').encode('utf-8')
wikiuri = wikiuri.decode('utf-8').encode('utf-8')
term = term.decode('utf-8').encode('utf-8')
return sentence + ' - ' + (wikiuri % term)
term = result[0]['title']
term = term.replace(' ', '_')
snippet = text(result[0]['snippet'])
return "%s - %s" % (snippet, wikiuri % term)
def vtluug(phenny, input):
origterm = input.groups()[1]

View File

@ -9,10 +9,12 @@ http://inamidst.com/phenny/
import re, urllib
import web
import json
wikiuri = 'http://%s.wikipedia.org/wiki/%s'
# wikisearch = 'http://%s.wikipedia.org/wiki/Special:Search?' \
# + 'search=%s&fulltext=Search'
wikiapi = 'http://en.wikipedia.org/w/api.php?action=query&list=search&srsearch=%s&limit=1&prop=snippet&format=json'
wikiuri = 'http://en.wikipedia.org/wiki/%s'
wikisearch = 'http://en.wikipedia.org/wiki/Special:Search?' \
+ 'search=%s&fulltext=Search'
r_tr = re.compile(r'(?ims)<tr[^>]*>.*?</tr>')
r_paragraph = re.compile(r'(?ims)<p[^>]*>.*?</p>|<li(?!n)[^>]*>.*?</li>')
@ -42,103 +44,18 @@ def text(html):
html = r_whitespace.sub(' ', html)
return unescape(html).strip()
def search(term):
try: import search
except ImportError, e:
print e
return term
if isinstance(term, unicode):
term = term.encode('utf-8')
else: term = term.decode('utf-8')
term = term.replace('_', ' ')
try: uri = search.result('site:en.wikipedia.org %s' % term)
except IndexError: return term
if uri:
return uri[len('http://en.wikipedia.org/wiki/'):]
else: return term
def wikipedia(term, language='en', last=False):
global wikiuri
if not '%' in term:
if isinstance(term, unicode):
t = term.encode('utf-8')
else: t = term
q = urllib.quote(t)
u = wikiuri % (language, q)
bytes = web.get(u)
else: bytes = web.get(wikiuri % (language, term))
bytes = r_tr.sub('', bytes)
if not last:
r = r_redirect.search(bytes[:4096])
if r:
term = urllib.unquote(r.group(1))
return wikipedia(term, language=language, last=True)
paragraphs = r_paragraph.findall(bytes)
if not paragraphs:
if not last:
term = search(term)
return wikipedia(term, language=language, last=True)
def wikipedia(term, last=False):
global wikiapi, wikiuri
url = wikiapi % term
bytes = web.get(url)
result = json.loads(bytes)
result = result['query']['search']
if len(result) <= 0:
return None
# Pre-process
paragraphs = [para for para in paragraphs
if (para and 'technical limitations' not in para
and 'window.showTocToggle' not in para
and 'Deletion_policy' not in para
and 'Template:AfD_footer' not in para
and not (para.startswith('<p><i>') and
para.endswith('</i></p>'))
and not 'disambiguation)"' in para)
and not '(images and media)' in para
and not 'This article contains a' in para
and not 'id="coordinates"' in para
and not 'class="thumb' in para]
# and not 'style="display:none"' in para]
for i, para in enumerate(paragraphs):
para = para.replace('<sup>', '|')
para = para.replace('</sup>', '|')
paragraphs[i] = text(para).strip()
# Post-process
paragraphs = [para for para in paragraphs if
(para and not (para.endswith(':') and len(para) < 150))]
para = text(paragraphs[0])
m = r_sentence.match(para)
if not m:
if not last:
term = search(term)
return wikipedia(term, language=language, last=True)
return None
sentence = m.group(0)
maxlength = 275
if len(sentence) > maxlength:
sentence = sentence[:maxlength]
words = sentence[:-5].split(' ')
words.pop()
sentence = ' '.join(words) + ' [...]'
if (('using the Article Wizard if you wish' in sentence)
or ('or add a request for it' in sentence)
or ('in existing articles' in sentence)):
if not last:
term = search(term)
return wikipedia(term, language=language, last=True)
return None
sentence = '"' + sentence.replace('"', "'") + '"'
sentence = sentence.decode('utf-8').encode('utf-8')
wikiuri = wikiuri.decode('utf-8').encode('utf-8')
term = term.decode('utf-8').encode('utf-8')
return sentence + ' - ' + (wikiuri % (language, term))
term = result[0]['title']
term = term.replace(' ', '_')
snippet = text(result[0]['snippet'])
return "%s - %s" % (snippet, wikiuri % term)
def wik(phenny, input):
origterm = input.groups()[1]
@ -147,19 +64,12 @@ def wik(phenny, input):
origterm = origterm.encode('utf-8')
term = urllib.unquote(origterm)
language = 'en'
if term.startswith(':') and (' ' in term):
a, b = term.split(' ', 1)
a = a.lstrip(':')
if a.isalpha():
language, term = a, b
term = term[0].upper() + term[1:]
term = term.replace(' ', '_')
try: result = wikipedia(term, language)
try: result = wikipedia(term)
except IOError:
args = (language, wikiuri % (language, term))
error = "Can't connect to %s.wikipedia.org (%s)" % args
error = "Can't connect to en.wikipedia.org (%s)" % (wikiuri % term)
return phenny.say(error)
if result is not None: