diff --git a/modules/commit.py b/modules/commit.py index e2ce0d2..8f4593f 100644 --- a/modules/commit.py +++ b/modules/commit.py @@ -6,6 +6,7 @@ author: mutantmonkey from urllib.error import HTTPError import web +from tools import GrumbleError def commit(phenny, input): """.commit - Get a What the Commit commit message.""" @@ -13,8 +14,8 @@ def commit(phenny, input): try: msg = web.get("http://whatthecommit.com/index.txt") except (HTTPError, IOError, ValueError): - phenny.reply("THE INTERNET IS FUCKING BROKEN. Please try again later.") - return + raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.") + phenny.reply(msg) commit.commands = ['commit'] diff --git a/modules/fcc.py b/modules/fcc.py index 6dce81a..af5d73b 100644 --- a/modules/fcc.py +++ b/modules/fcc.py @@ -5,6 +5,7 @@ author: mutantmonkey """ from urllib.error import HTTPError +from tools import GrumbleError import web import json @@ -20,8 +21,7 @@ def fcc(phenny, input): req = web.get("http://callook.info/{0}/json".format(web.quote(callsign))) data = json.loads(req) except (HTTPError, IOError, ValueError): - phenny.say("THE INTERNET IS FUCKING BROKEN. Please try again later.") - return + raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.") if len(data) <= 0 or data['status'] == 'INVALID': phenny.reply('No results found for {0}'.format(callsign)) diff --git a/modules/hs.py b/modules/hs.py index a093dc0..586275f 100644 --- a/modules/hs.py +++ b/modules/hs.py @@ -4,6 +4,7 @@ hs.py - hokie stalker module author: mutantmonkey """ +from tools import GrumbleError import web import lxml.etree @@ -18,8 +19,7 @@ def search(query): try: req = web.get(SEARCH_URL.format(query)) except (HTTPError, IOError): - phenny.say("THE INTERNET IS FUCKING BROKEN. Please try again later.") - return + raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.") xml = lxml.etree.fromstring(req.encode('utf-8')) results = xml.findall('{0}searchResponse/{0}searchResultEntry'.format(NS)) diff --git a/modules/imdb.py b/modules/imdb.py index f0e34b2..ad8c2a9 100644 --- a/modules/imdb.py +++ b/modules/imdb.py @@ -25,8 +25,8 @@ def imdb(phenny, input): if not query: return phenny.reply('.imdb what?') m = imdb_search(query) - try: - phenny.reply('{0} ({1}): {2} http://imdb.com/title/{3}'.format(m['Title'], m['Year'], m['Plot'], m['imdbID'])) + try: + phenny.reply('{0} ({1}): {2} http://imdb.com/title/{3}'.format(m['Title'], m['Year'], m['Plot'], m['imdbID'])) except: - phenny.reply("No results found for '%s'." % query) + phenny.reply("No results found for '%s'." % query) imdb.commands = ['imdb'] diff --git a/modules/linx.py b/modules/linx.py index 8bb2bd9..bbf712a 100644 --- a/modules/linx.py +++ b/modules/linx.py @@ -5,6 +5,7 @@ author: mutantmonkey , andreim author: mutantmonkey """ -import random - from urllib.error import HTTPError +from tools import GrumbleError import web import lxml.html + def fml(phenny, input): """.fml""" try: req = web.get("http://www.fmylife.com/random") except (HTTPError, IOError): - phenny.say("I tried to use .fml, but it was broken. FML") - return + raise GrumbleError("I tried to use .fml, but it was broken. FML") doc = lxml.html.fromstring(req) quote = doc.find_class('article')[0][0].text_content() phenny.say(quote) fml.commands = ['fml'] + def mlia(phenny, input): """.mlia - My life is average.""" try: req = web.get("http://mylifeisaverage.com/") except (HTTPError, IOError): - phenny.say("I tried to use .mlia, but it wasn't loading. MLIA") - return + raise GrumbleError("I tried to use .mlia, but it wasn't loading. MLIA") doc = lxml.html.fromstring(req) quote = doc.find_class('story')[0][0].text_content() @@ -38,70 +37,71 @@ def mlia(phenny, input): phenny.say(quote) mlia.commands = ['mlia'] + def mlib(phenny, input): """.mlib - My life is bro.""" try: req = web.get("http://mylifeisbro.com/random") except (HTTPError, IOError): - phenny.say("MLIB is out getting a case of Natty. It's chill.") - return + raise GrumbleError("MLIB is out getting a case of Natty. It's chill.") doc = lxml.html.fromstring(req) quote = doc.find_class('storycontent')[0][0].text_content() phenny.say(quote) mlib.commands = ['mlib'] + def mlig(phenny, input): """.mlig - My life is ginger.""" try: req = web.get("http://www.mylifeisginger.org/random") except (HTTPError, IOError): - phenny.say("Busy eating your soul. Be back soon.") - return + raise GrumbleError("Busy eating your soul. Be back soon.") doc = lxml.html.fromstring(req) quote = doc.find_class('oldlink')[0].text_content() phenny.say(quote) mlig.commands = ['mlig'] + def mlih(phenny, input): """.mlih - My life is ho.""" try: req = web.get("http://mylifeisho.com/random") except (HTTPError, IOError): - phenny.say("MLIH is giving some dome to some lax bros.") - return + raise GrumbleError("MLIH is giving some dome to some lax bros.") doc = lxml.html.fromstring(req) quote = doc.find_class('storycontent')[0][0].text_content() phenny.say(quote) mlih.commands = ['mlih'] + def mlihp(phenny, input): """.mlihp - My life is Harry Potter.""" try: req = web.get("http://www.mylifeishp.com/random") except (HTTPError, IOError): - phenny.say("This service is not available to Muggles.") - return + raise GrumbleError("This service is not available to Muggles.") doc = lxml.html.fromstring(req) quote = doc.find_class('oldlink')[0].text_content() phenny.say(quote) mlihp.commands = ['mlihp'] + def mlit(phenny, input): """.mlit - My life is Twilight.""" try: req = web.get("http://mylifeistwilight.com/random") except (HTTPError, IOError): - phenny.say("Error: Your life is too Twilight. Go outside.") - return + raise GrumbleError("Error: Your life is too Twilight. Go outside.") doc = lxml.html.fromstring(req) quote = doc.find_class('fmllink')[0].text_content() phenny.say(quote) mlit.commands = ['mlit'] + if __name__ == '__main__': print(__doc__.strip()) diff --git a/modules/node-todo.py b/modules/node-todo.py index d110adb..b8263f5 100644 --- a/modules/node-todo.py +++ b/modules/node-todo.py @@ -7,6 +7,7 @@ author: telnoratti from urllib.error import HTTPError from urllib import request +from tools import GrumbleError import web import json @@ -24,9 +25,8 @@ def xss(phenny, input): try: url = urlshortener(url) except (HTTPError, IOError): - phenny.reply("THE INTERNET IS FUCKING BROKEN. Please try again later.") - return - + raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.") + phenny.reply(url) xss.rule = (['xss'], r'(.*)') diff --git a/modules/randomreddit.py b/modules/randomreddit.py index 255c742..385cb1e 100644 --- a/modules/randomreddit.py +++ b/modules/randomreddit.py @@ -7,45 +7,46 @@ author: andreim import web import re import json +from tools import GrumbleError from random import choice + def randomreddit(phenny, input): - subreddit = input.group(2) - if not subreddit: - phenny.say(".random - get a random link from the subreddit's frontpage") - return - - if not re.match('^[A-Za-z0-9_-]*$',subreddit): - phenny.say(input.nick + ": bad subreddit format.") - return + subreddit = input.group(2) + if not subreddit: + phenny.say(".random - get a random link from the subreddit's frontpage") + return + + if not re.match('^[A-Za-z0-9_-]*$',subreddit): + phenny.say(input.nick + ": bad subreddit format.") + return - url = "http://www.reddit.com/r/" + subreddit + "/.json" - try: - resp = web.get(url) - except: - try: - resp = web.get(url) - except: - try: - resp = web.get(url) - except: - phenny.reply('Reddit or subreddit unreachable.') - return - - reddit = json.loads(resp) - post = choice(reddit['data']['children']) + url = "http://www.reddit.com/r/" + subreddit + "/.json" + try: + resp = web.get(url) + except: + try: + resp = web.get(url) + except: + try: + resp = web.get(url) + except: + raise GrumbleError('Reddit or subreddit unreachable.') + + reddit = json.loads(resp) + post = choice(reddit['data']['children']) - nsfw = False - if post['data']['over_18']: - nsfw = True - - if nsfw: - phenny.reply("!!NSFW!! " + post['data']['url'] + " (" + post['data']['title'] + ") !!NSFW!!") - else: - phenny.reply(post['data']['url'] + " (" + post['data']['title'] + ")") + nsfw = False + if post['data']['over_18']: + nsfw = True + + if nsfw: + phenny.reply("!!NSFW!! " + post['data']['url'] + " (" + post['data']['title'] + ") !!NSFW!!") + else: + phenny.reply(post['data']['url'] + " (" + post['data']['title'] + ")") randomreddit.commands = ['random'] randomreddit.priority = 'medium' -randomreddit.thread = False \ No newline at end of file +randomreddit.thread = False diff --git a/modules/rule34.py b/modules/rule34.py index 690cb5d..9c94af6 100644 --- a/modules/rule34.py +++ b/modules/rule34.py @@ -6,6 +6,7 @@ author: mutantmonkey from urllib.parse import quote as urlquote from urllib.error import HTTPError +from tools import GrumbleError import web import lxml.html @@ -20,8 +21,7 @@ def rule34(phenny, input): try: req = web.get("http://rule34.xxx/index.php?page=post&s=list&tags={0}".format(urlquote(q))) except (HTTPError, IOError): - phenny.say("THE INTERNET IS FUCKING BROKEN. Please try again later.") - return + raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.") doc = lxml.html.fromstring(req) doc.make_links_absolute('http://rule34.xxx/') @@ -33,8 +33,7 @@ def rule34(phenny, input): try: link = thumbs[0].find('a').attrib['href'] except AttributeError: - phenny.say("THE INTERNET IS FUCKING BROKEN. Please try again later.") - return + raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.") response = '!!NSFW!! -> {0} <- !!NSFW!!'.format(link) phenny.reply(response) diff --git a/modules/short.py b/modules/short.py index d5202ca..c41663c 100644 --- a/modules/short.py +++ b/modules/short.py @@ -5,6 +5,7 @@ author: andreim """ from urllib.error import HTTPError +from tools import GrumbleError import web import json @@ -19,8 +20,7 @@ def short(phenny, input): try: req = web.post("http://vtlu.ug/vtluug", {'lurl': url}) except (HTTPError, IOError): - phenny.reply("THE INTERNET IS FUCKING BROKEN. Please try again later.") - return + raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.") phenny.reply(req) short.rule = (['short'], r'(.*)') diff --git a/modules/slogan.py b/modules/slogan.py index d09c10e..37768fb 100644 --- a/modules/slogan.py +++ b/modules/slogan.py @@ -6,6 +6,7 @@ Copyright (c) 2011 Dafydd Crosby - http://www.dafyddcrosby.com Licensed under the Eiffel Forum License 2. """ +from tools import GrumbleError import re import web @@ -29,8 +30,7 @@ def slogan(phenny, input): slogan = remove_tags.sub('', slogan) if not slogan: - phenny.say("Looks like an issue with sloganizer.net") - return + raise GrumbleError("Looks like an issue with sloganizer.net") phenny.say(slogan) slogan.commands = ['slogan'] slogan.example = '.slogan Granola' diff --git a/modules/tfw.py b/modules/tfw.py index 68b5fce..89f3b6d 100644 --- a/modules/tfw.py +++ b/modules/tfw.py @@ -7,6 +7,7 @@ author: mutantmonkey from urllib.parse import quote as urlquote from urllib.error import HTTPError +from tools import GrumbleError import web import lxml.html import lxml.cssselect @@ -31,8 +32,7 @@ def tfw(phenny, input, fahrenheit=False, celsius=False): try: req = web.get("http://thefuckingweather.com/?where={0}{1}".format(urlquote(where), celsius_param)) except (HTTPError, IOError): - phenny.say("THE INTERNET IS FUCKING BROKEN. Please try again later.") - return + raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.") doc = lxml.html.fromstring(req) diff --git a/modules/urbandict.py b/modules/urbandict.py index 5368eef..3e7d051 100644 --- a/modules/urbandict.py +++ b/modules/urbandict.py @@ -6,6 +6,7 @@ author: mutantmonkey import urllib.request from urllib.error import HTTPError +from tools import GrumbleError import web import json @@ -30,8 +31,8 @@ def urbandict(phenny, input): data = req.read().decode('utf-8') data = json.loads(data) except (HTTPError, IOError, ValueError): - phenny.say("Urban Dictionary slemped out on me. Try again in a minute.") - return + raise GrumbleError( + "Urban Dictionary slemped out on me. Try again in a minute.") if data['result_type'] == 'no_results': phenny.say("No results found for {0}".format(word)) diff --git a/modules/weather.py b/modules/weather.py index cf5cd08..58e0fe5 100644 --- a/modules/weather.py +++ b/modules/weather.py @@ -9,7 +9,7 @@ http://inamidst.com/phenny/ import re, urllib.request, urllib.parse, urllib.error import web -from tools import deprecated +from tools import deprecated, GrumbleError r_from = re.compile(r'(?i)([+-]\d+):00 from') @@ -29,9 +29,6 @@ def location(name): lng = results['geonames'][0]['lng'] return name, countryName, lat, lng -class GrumbleError(object): - pass - def local(icao, hour, minute): uri = ('http://www.flightstats.com/' + 'go/Airport/airportDetails.do?airportCode=%s') diff --git a/modules/wuvt.py b/modules/wuvt.py index 1d7ba47..3e8c564 100644 --- a/modules/wuvt.py +++ b/modules/wuvt.py @@ -7,6 +7,7 @@ http://github.com/randynobx/phenny/ """ from urllib.error import URLError, HTTPError +from tools import GrumbleError import re import web @@ -19,7 +20,7 @@ def wuvt(phenny, input) : playing = web.get('http://www.wuvt.vt.edu/playlists/latest_track.php') djpage = web.get('http://www.wuvt.vt.edu/playlists/current_dj.php') except (URLError, HTTPError): - return phenny.reply('Cannot connect to wuvt') + raise GrumbleError('Cannot connect to wuvt') play= r_play.search(playing) song = play.group(1) artist = play.group(2) diff --git a/tools.py b/tools.py index 78bd5f9..a7cc45d 100755 --- a/tools.py +++ b/tools.py @@ -7,6 +7,11 @@ Licensed under the Eiffel Forum License 2. http://inamidst.com/phenny/ """ + +class GrumbleError(Exception): + pass + + def deprecated(old): def new(phenny, input, old=old): self = phenny