diff --git a/modules/archwiki.py b/modules/archwiki.py index d482b26..e954e09 100755 --- a/modules/archwiki.py +++ b/modules/archwiki.py @@ -12,10 +12,12 @@ author: mutantmonkey import re, urllib import web +import json +wikiapi = 'https://wiki.archlinux.org/api.php?action=query&list=search&srsearch=%s&limit=1&prop=snippet&format=json' wikiuri = 'https://wiki.archlinux.org/index.php/%s' wikisearch = 'https://wiki.archlinux.org/index.php/Special:Search?' \ - + 'search=%s&fulltext=Search' + + 'search=%s&fulltext=Search' r_tr = re.compile(r'(?ims)]*>.*?') r_content = re.compile(r'(?ims)

\n.*?') @@ -23,153 +25,63 @@ r_paragraph = re.compile(r'(?ims)]*>.*?

|]*>.*?') r_tag = re.compile(r'<(?!!)[^>]+>') r_whitespace = re.compile(r'[\t\r\n ]+') r_redirect = re.compile( - r'(?ims)class=.redirectText.>\s*\s*') - s = s.replace('<', '<') - s = s.replace('&', '&') - s = s.replace(' ', ' ') - return s + s = s.replace('>', '>') + s = s.replace('<', '<') + s = s.replace('&', '&') + s = s.replace(' ', ' ') + return s def text(html): - html = r_tag.sub('', html) - html = r_whitespace.sub(' ', html) - return unescape(html).strip() - -def search(term): - try: import search - except ImportError, e: - print e - return term - - if isinstance(term, unicode): - term = term.encode('utf-8') - else: term = term.decode('utf-8') - - term = term.replace('_', ' ') - try: uri = search.result('site:wiki.archlinux.org %s' % term) - except IndexError: return term - if uri: - return uri[len('https://wiki.archlinux.org/index.php/'):] - else: return term + html = r_tag.sub('', html) + html = r_whitespace.sub(' ', html) + return unescape(html).strip() def archwiki(term, last=False): - global wikiuri - if not '%' in term: - if isinstance(term, unicode): - t = term.encode('utf-8') - else: t = term - q = urllib.quote(t) - u = wikiuri % q - bytes = web.get(u) - else: bytes = web.get(wikiuri % term) - bytes = r_tr.sub('', bytes) - - if not last: - r = r_redirect.search(bytes[:4096]) - if r: - term = urllib.unquote(r.group(1)) - return archwiki(term, last=True) - - # kind of hacky fix to deal with Arch wiki template, should be cleaned up a bit - content = r_content.findall(bytes) - if not content or len(content) < 1: - return None - paragraphs = r_paragraph.findall(content[0]) - print paragraphs - - if not paragraphs: - if not last: - term = search(term) - return archwiki(term, last=True) - return None - - # Pre-process - paragraphs = [para for para in paragraphs - if (para and 'technical limitations' not in para - and 'window.showTocToggle' not in para - and 'Deletion_policy' not in para - and 'Template:AfD_footer' not in para - and not (para.startswith('

') and - para.endswith('

')) - and not 'disambiguation)"' in para) - and not '(images and media)' in para - and not 'This article contains a' in para - and not 'id="coordinates"' in para - and not 'class="thumb' in para - and not 'There is currently no text in this page.' in para] - # and not 'style="display:none"' in para] - - for i, para in enumerate(paragraphs): - para = para.replace('', '|') - para = para.replace('', '|') - paragraphs[i] = text(para).strip() - - # Post-process - paragraphs = [para for para in paragraphs if - (para and not (para.endswith(':') and len(para) < 150))] - - para = text(paragraphs[0]) - m = r_sentence.match(para) - - if not m: - if not last: - term = search(term) - return archwiki(term, last=True) - return None - sentence = m.group(0) - - maxlength = 275 - if len(sentence) > maxlength: - sentence = sentence[:maxlength] - words = sentence[:-5].split(' ') - words.pop() - sentence = ' '.join(words) + ' [...]' - - if (('using the Article Wizard if you wish' in sentence) - or ('or add a request for it' in sentence)): - if not last: - term = search(term) - return archwiki(term, last=True) - return None - - sentence = '"' + sentence.replace('"', "'") + '"' - sentence = sentence.decode('utf-8').encode('utf-8') - wikiuri = wikiuri.decode('utf-8').encode('utf-8') - term = term.decode('utf-8').encode('utf-8') - return sentence + ' - ' + (wikiuri % term) + global wikiapi, wikiuri + url = wikiapi % term + bytes = web.get(url) + result = json.loads(bytes) + result = result['query']['search'] + if len(result) <= 0: + return None + term = result[0]['title'] + term = term.replace(' ', '_') + snippet = text(result[0]['snippet']) + return "%s - %s" % (snippet, wikiuri % term) def awik(phenny, input): - origterm = input.groups()[1] - if not origterm: - return phenny.say('Perhaps you meant ".awik dwm"?') - origterm = origterm.encode('utf-8') + origterm = input.groups()[1] + if not origterm: + return phenny.say('Perhaps you meant ".awik dwm"?') + origterm = origterm.encode('utf-8') - term = urllib.unquote(origterm) - term = term[0].upper() + term[1:] - term = term.replace(' ', '_') + term = urllib.unquote(origterm) + term = term[0].upper() + term[1:] + term = term.replace(' ', '_') - try: result = archwiki(term) - except IOError: - error = "Can't connect to wiki.archlinux.org (%s)" % (wikiuri % term) - return phenny.say(error) + try: result = archwiki(term) + except IOError: + error = "Can't connect to wiki.archlinux.org (%s)" % (wikiuri % term) + return phenny.say(error) - if result is not None: - phenny.say(result) - else: phenny.say('Can\'t find anything in the ArchWiki for "%s".' % origterm) + if result is not None: + phenny.say(result) + else: phenny.say('Can\'t find anything in the ArchWiki for "%s".' % origterm) awik.commands = ['awik'] awik.priority = 'high' if __name__ == '__main__': - print __doc__.strip() + print __doc__.strip() diff --git a/modules/vtluugwiki.py b/modules/vtluugwiki.py index f4cac2c..f739d2b 100755 --- a/modules/vtluugwiki.py +++ b/modules/vtluugwiki.py @@ -12,158 +12,75 @@ author: mutantmonkey import re, urllib import web +import json +wikiapi = 'https://vtluug.org/w/api.php?action=query&list=search&srsearch=%s&limit=1&prop=snippet&format=json' wikiuri = 'https://vtluug.org/wiki/%s' wikisearch = 'https://vtluug.org/wiki/Special:Search?' \ - + 'search=%s&fulltext=Search' + + 'search=%s&fulltext=Search' r_tr = re.compile(r'(?ims)]*>.*?') r_paragraph = re.compile(r'(?ims)]*>.*?

|]*>.*?') r_tag = re.compile(r'<(?!!)[^>]+>') r_whitespace = re.compile(r'[\t\r\n ]+') r_redirect = re.compile( - r'(?ims)class=.redirectText.>\s*\s*') - s = s.replace('<', '<') - s = s.replace('&', '&') - s = s.replace(' ', ' ') - return s + s = s.replace('>', '>') + s = s.replace('<', '<') + s = s.replace('&', '&') + s = s.replace(' ', ' ') + return s def text(html): - html = r_tag.sub('', html) - html = r_whitespace.sub(' ', html) - return unescape(html).strip() - -def search(term): - try: import search - except ImportError, e: - print e - return term - - if isinstance(term, unicode): - term = term.encode('utf-8') - else: term = term.decode('utf-8') - - term = term.replace('_', ' ') - try: uri = search.result('site:vtluug.org %s' % term) - except IndexError: return term - if uri: - return uri[len('http://vtluug.org/wiki/'):] - else: return term + html = r_tag.sub('', html) + html = r_whitespace.sub(' ', html) + return unescape(html).strip() def vtluugwiki(term, last=False): - global wikiuri - if not '%' in term: - if isinstance(term, unicode): - t = term.encode('utf-8') - else: t = term - q = urllib.quote(t) - u = wikiuri % q - bytes = web.get(u) - else: bytes = web.get(wikiuri % term) - bytes = r_tr.sub('', bytes) - - if not last: - r = r_redirect.search(bytes[:4096]) - if r: - term = urllib.unquote(r.group(1)) - return vtluugwiki(term, last=True) - - paragraphs = r_paragraph.findall(bytes) - - if not paragraphs: - if not last: - term = search(term) - return vtluugwiki(term, last=True) - return None - - # Pre-process - paragraphs = [para for para in paragraphs - if (para and 'technical limitations' not in para - and 'window.showTocToggle' not in para - and 'Deletion_policy' not in para - and 'Template:AfD_footer' not in para - and not (para.startswith('

') and - para.endswith('

')) - and not 'disambiguation)"' in para) - and not '(images and media)' in para - and not 'This article contains a' in para - and not 'id="coordinates"' in para - and not 'class="thumb' in para - and not 'There is currently no text in this page.' in para] - # and not 'style="display:none"' in para] - - for i, para in enumerate(paragraphs): - para = para.replace('', '|') - para = para.replace('', '|') - paragraphs[i] = text(para).strip() - - # Post-process - paragraphs = [para for para in paragraphs if - (para and not (para.endswith(':') and len(para) < 150))] - - para = text(paragraphs[0]) - m = r_sentence.match(para) - - if not m: - if not last: - term = search(term) - return vtluugwiki(term, last=True) - return None - sentence = m.group(0) - - maxlength = 275 - if len(sentence) > maxlength: - sentence = sentence[:maxlength] - words = sentence[:-5].split(' ') - words.pop() - sentence = ' '.join(words) + ' [...]' - - if (('using the Article Wizard if you wish' in sentence) - or ('or add a request for it' in sentence)): - if not last: - term = search(term) - return vtluugwiki(term, last=True) - return None - - sentence = '"' + sentence.replace('"', "'") + '"' - sentence = sentence.decode('utf-8').encode('utf-8') - wikiuri = wikiuri.decode('utf-8').encode('utf-8') - term = term.decode('utf-8').encode('utf-8') - return sentence + ' - ' + (wikiuri % term) + global wikiapi, wikiuri + url = wikiapi % term + bytes = web.get(url) + result = json.loads(bytes) + result = result['query']['search'] + if len(result) <= 0: + return None + term = result[0]['title'] + term = term.replace(' ', '_') + snippet = text(result[0]['snippet']) + return "%s - %s" % (snippet, wikiuri % term) def vtluug(phenny, input): - origterm = input.groups()[1] - if not origterm: - return phenny.say('Perhaps you meant ".vtluug Zen"?') - origterm = origterm.encode('utf-8') + origterm = input.groups()[1] + if not origterm: + return phenny.say('Perhaps you meant ".vtluug Zen"?') + origterm = origterm.encode('utf-8') - term = urllib.unquote(origterm) - term = term[0].upper() + term[1:] - term = term.replace(' ', '_') + term = urllib.unquote(origterm) + term = term[0].upper() + term[1:] + term = term.replace(' ', '_') - try: result = vtluugwiki(term) - except IOError: - error = "Can't connect to vtluug.org (%s)" % (wikiuri % term) - return phenny.say(error) + try: result = vtluugwiki(term) + except IOError: + error = "Can't connect to vtluug.org (%s)" % (wikiuri % term) + return phenny.say(error) - if result is not None: - phenny.say(result) - else: phenny.say('Can\'t find anything in the VTLUUG Wiki for "%s".' % origterm) + if result is not None: + phenny.say(result) + else: phenny.say('Can\'t find anything in the VTLUUG Wiki for "%s".' % origterm) vtluug.commands = ['vtluug'] vtluug.priority = 'high' if __name__ == '__main__': - print __doc__.strip() + print __doc__.strip() diff --git a/modules/wikipedia.py b/modules/wikipedia.py index b476ba3..510707a 100755 --- a/modules/wikipedia.py +++ b/modules/wikipedia.py @@ -9,165 +9,75 @@ http://inamidst.com/phenny/ import re, urllib import web +import json -wikiuri = 'http://%s.wikipedia.org/wiki/%s' -# wikisearch = 'http://%s.wikipedia.org/wiki/Special:Search?' \ -# + 'search=%s&fulltext=Search' +wikiapi = 'http://en.wikipedia.org/w/api.php?action=query&list=search&srsearch=%s&limit=1&prop=snippet&format=json' +wikiuri = 'http://en.wikipedia.org/wiki/%s' +wikisearch = 'http://en.wikipedia.org/wiki/Special:Search?' \ + + 'search=%s&fulltext=Search' r_tr = re.compile(r'(?ims)]*>.*?') r_paragraph = re.compile(r'(?ims)]*>.*?

|]*>.*?') r_tag = re.compile(r'<(?!!)[^>]+>') r_whitespace = re.compile(r'[\t\r\n ]+') r_redirect = re.compile( - r'(?ims)class=.redirectText.>\s*\s*') - s = s.replace('<', '<') - s = s.replace('&', '&') - s = s.replace(' ', ' ') - return s + s = s.replace('>', '>') + s = s.replace('<', '<') + s = s.replace('&', '&') + s = s.replace(' ', ' ') + return s def text(html): - html = r_tag.sub('', html) - html = r_whitespace.sub(' ', html) - return unescape(html).strip() + html = r_tag.sub('', html) + html = r_whitespace.sub(' ', html) + return unescape(html).strip() -def search(term): - try: import search - except ImportError, e: - print e - return term - - if isinstance(term, unicode): - term = term.encode('utf-8') - else: term = term.decode('utf-8') - - term = term.replace('_', ' ') - try: uri = search.result('site:en.wikipedia.org %s' % term) - except IndexError: return term - if uri: - return uri[len('http://en.wikipedia.org/wiki/'):] - else: return term - -def wikipedia(term, language='en', last=False): - global wikiuri - if not '%' in term: - if isinstance(term, unicode): - t = term.encode('utf-8') - else: t = term - q = urllib.quote(t) - u = wikiuri % (language, q) - bytes = web.get(u) - else: bytes = web.get(wikiuri % (language, term)) - bytes = r_tr.sub('', bytes) - - if not last: - r = r_redirect.search(bytes[:4096]) - if r: - term = urllib.unquote(r.group(1)) - return wikipedia(term, language=language, last=True) - - paragraphs = r_paragraph.findall(bytes) - - if not paragraphs: - if not last: - term = search(term) - return wikipedia(term, language=language, last=True) - return None - - # Pre-process - paragraphs = [para for para in paragraphs - if (para and 'technical limitations' not in para - and 'window.showTocToggle' not in para - and 'Deletion_policy' not in para - and 'Template:AfD_footer' not in para - and not (para.startswith('

') and - para.endswith('

')) - and not 'disambiguation)"' in para) - and not '(images and media)' in para - and not 'This article contains a' in para - and not 'id="coordinates"' in para - and not 'class="thumb' in para] - # and not 'style="display:none"' in para] - - for i, para in enumerate(paragraphs): - para = para.replace('', '|') - para = para.replace('', '|') - paragraphs[i] = text(para).strip() - - # Post-process - paragraphs = [para for para in paragraphs if - (para and not (para.endswith(':') and len(para) < 150))] - - para = text(paragraphs[0]) - m = r_sentence.match(para) - - if not m: - if not last: - term = search(term) - return wikipedia(term, language=language, last=True) - return None - sentence = m.group(0) - - maxlength = 275 - if len(sentence) > maxlength: - sentence = sentence[:maxlength] - words = sentence[:-5].split(' ') - words.pop() - sentence = ' '.join(words) + ' [...]' - - if (('using the Article Wizard if you wish' in sentence) - or ('or add a request for it' in sentence) - or ('in existing articles' in sentence)): - if not last: - term = search(term) - return wikipedia(term, language=language, last=True) - return None - - sentence = '"' + sentence.replace('"', "'") + '"' - sentence = sentence.decode('utf-8').encode('utf-8') - wikiuri = wikiuri.decode('utf-8').encode('utf-8') - term = term.decode('utf-8').encode('utf-8') - return sentence + ' - ' + (wikiuri % (language, term)) +def wikipedia(term, last=False): + global wikiapi, wikiuri + url = wikiapi % term + bytes = web.get(url) + result = json.loads(bytes) + result = result['query']['search'] + if len(result) <= 0: + return None + term = result[0]['title'] + term = term.replace(' ', '_') + snippet = text(result[0]['snippet']) + return "%s - %s" % (snippet, wikiuri % term) def wik(phenny, input): - origterm = input.groups()[1] - if not origterm: - return phenny.say('Perhaps you meant ".wik Zen"?') - origterm = origterm.encode('utf-8') + origterm = input.groups()[1] + if not origterm: + return phenny.say('Perhaps you meant ".wik Zen"?') + origterm = origterm.encode('utf-8') - term = urllib.unquote(origterm) - language = 'en' - if term.startswith(':') and (' ' in term): - a, b = term.split(' ', 1) - a = a.lstrip(':') - if a.isalpha(): - language, term = a, b - term = term[0].upper() + term[1:] - term = term.replace(' ', '_') + term = urllib.unquote(origterm) + term = term[0].upper() + term[1:] + term = term.replace(' ', '_') - try: result = wikipedia(term, language) - except IOError: - args = (language, wikiuri % (language, term)) - error = "Can't connect to %s.wikipedia.org (%s)" % args - return phenny.say(error) + try: result = wikipedia(term) + except IOError: + error = "Can't connect to en.wikipedia.org (%s)" % (wikiuri % term) + return phenny.say(error) - if result is not None: - phenny.say(result) - else: phenny.say('Can\'t find anything in Wikipedia for "%s".' % origterm) + if result is not None: + phenny.say(result) + else: phenny.say('Can\'t find anything in Wikipedia for "%s".' % origterm) wik.commands = ['wik'] wik.priority = 'high' if __name__ == '__main__': - print __doc__.strip() + print __doc__.strip()