Migrate modules using urllib2 to use phenny's web module

master
mutantmonkey 2011-09-21 20:43:05 -04:00
parent c8fe22190c
commit f34695717d
4 changed files with 44 additions and 51 deletions

View File

@ -129,14 +129,7 @@ def gettitle(uri):
try:
redirects = 0
while True:
headers = {
'Accept': 'text/html',
'User-Agent': 'Mozilla/5.0 (Phenny)'
}
req = urllib2.Request(uri, headers=headers)
u = urllib2.urlopen(req)
info = u.info()
u.close()
info = web.head(uri)
if not isinstance(info, list):
status = '200'
@ -157,9 +150,9 @@ def gettitle(uri):
if not (('/html' in mtype) or ('/xhtml' in mtype)):
return None
u = urllib2.urlopen(req)
bytes = u.read(262144)
u.close()
bytes = web.get(uri)
#bytes = u.read(262144)
#u.close()
except IOError:
return

View File

@ -8,32 +8,33 @@ author: mutantmonkey <mutantmonkey@mutantmonkey.in>
import random
from urllib import quote as urlquote
from urllib2 import urlopen, HTTPError
from urllib2 import HTTPError
import web
import lxml.html
def fml(phenny, input):
""".fml"""
try:
req = urlopen("http://www.fmylife.com/random")
req = web.get("http://www.fmylife.com/random")
except HTTPError:
phenny.say("I tried to use .fml, but it was broken. FML")
return
doc = lxml.html.parse(req)
quote = doc.getroot().find_class('article')[0][0].text_content()
doc = lxml.html.fromstring(req)
quote = doc.find_class('article')[0][0].text_content()
phenny.say(quote)
fml.commands = ['fml']
def mlia(phenny, input):
""".mlia - My life is average."""
try:
req = urlopen("http://mylifeisaverage.com/")
req = web.get("http://mylifeisaverage.com/")
except HTTPError:
phenny.say("I tried to use .mlia, but it wasn't loading. MLIA")
return
doc = lxml.html.parse(req)
quote = doc.getroot().find_class('story')[0][0].text_content()
doc = lxml.html.fromstring(req)
quote = doc.find_class('story')[0][0].text_content()
quote = quote.strip()
phenny.say(quote)
mlia.commands = ['mlia']
@ -41,14 +42,14 @@ mlia.commands = ['mlia']
def mliarab(phenny, input):
""".mliarab - My life is Arabic."""
try:
req = urlopen("http://mylifeisarabic.com/random/")
req = web.get("http://mylifeisarabic.com/random/")
except HTTPError:
phenny.say("The site you requested, mylifeisarabic.com, has been banned \
in the UAE. You will be reported to appropriate authorities")
return
doc = lxml.html.parse(req)
quotes = doc.getroot().find_class('entry')
doc = lxml.html.fromstring(req)
quotes = doc.find_class('entry')
quote = random.choice(quotes)[0].text_content()
quote = quote.strip()
phenny.say(quote)
@ -57,26 +58,26 @@ mliarab.commands = ['mliar', 'mliarab']
def mlib(phenny, input):
""".mlib - My life is bro."""
try:
req = urlopen("http://mylifeisbro.com/random")
req = web.get("http://mylifeisbro.com/random")
except HTTPError:
phenny.say("MLIB is out getting a case of Natty. It's chill.")
return
doc = lxml.html.parse(req)
quote = doc.getroot().find_class('storycontent')[0][0].text_content()
doc = lxml.html.fromstring(req)
quote = doc.find_class('storycontent')[0][0].text_content()
phenny.say(quote)
mlib.commands = ['mlib']
def mlic(phenny, input):
""".mlic - My life is creepy."""
try:
req = urlopen("http://mylifeiscreepy.com/random")
req = web.get("http://mylifeiscreepy.com/random")
except HTTPError:
phenny.say("Error: Have you checked behind you?")
return
doc = lxml.html.parse(req)
quote = doc.getroot().find_class('oldlink')[0].text_content()
doc = lxml.html.fromstring(req)
quote = doc.find_class('oldlink')[0].text_content()
quote = quote.strip()
phenny.say(quote)
mlic.commands = ['mlic']
@ -84,65 +85,65 @@ mlic.commands = ['mlic']
def mlid(phenny, input):
""".mlib - My life is Desi."""
try:
req = urlopen("http://www.mylifeisdesi.com/random")
req = web.get("http://www.mylifeisdesi.com/random")
except HTTPError:
phenny.say("MLID is busy at the hookah lounge, be back soon.")
return
doc = lxml.html.parse(req)
quote = doc.getroot().find_class('oldlink')[0].text_content()
doc = lxml.html.fromstring(req)
quote = doc.find_class('oldlink')[0].text_content()
phenny.say(quote)
mlid.commands = ['mlid']
def mlig(phenny, input):
""".mlig - My life is ginger."""
try:
req = urlopen("http://www.mylifeisginger.org/random")
req = web.get("http://www.mylifeisginger.org/random")
except HTTPError:
phenny.say("Busy eating your soul. Be back soon.")
return
doc = lxml.html.parse(req)
quote = doc.getroot().find_class('oldlink')[0].text_content()
doc = lxml.html.fromstring(req)
quote = doc.find_class('oldlink')[0].text_content()
phenny.say(quote)
mlig.commands = ['mlig']
def mlih(phenny, input):
""".mlih - My life is ho."""
try:
req = urlopen("http://mylifeisho.com/random")
req = web.get("http://mylifeisho.com/random")
except HTTPError:
phenny.say("MLIH is giving some dome to some lax bros.")
return
doc = lxml.html.parse(req)
quote = doc.getroot().find_class('storycontent')[0][0].text_content()
doc = lxml.html.fromstring(req)
quote = doc.find_class('storycontent')[0][0].text_content()
phenny.say(quote)
mlih.commands = ['mlih']
def mlihp(phenny, input):
""".mlihp - My life is Harry Potter."""
try:
req = urlopen("http://www.mylifeishp.com/random")
req = web.get("http://www.mylifeishp.com/random")
except HTTPError:
phenny.say("This service is not available to Muggles.")
return
doc = lxml.html.parse(req)
quote = doc.getroot().find_class('oldlink')[0].text_content()
doc = lxml.html.fromstring(req)
quote = doc.find_class('oldlink')[0].text_content()
phenny.say(quote)
mlihp.commands = ['mlihp']
def mlit(phenny, input):
""".mlit - My life is Twilight."""
try:
req = urlopen("http://mylifeistwilight.com/random")
req = web.get("http://mylifeistwilight.com/random")
except HTTPError:
phenny.say("Error: Your life is too Twilight. Go outside.")
return
doc = lxml.html.parse(req)
quote = doc.getroot().find_class('fmllink')[0].text_content()
doc = lxml.html.fromstring(req)
quote = doc.find_class('fmllink')[0].text_content()
phenny.say(quote)
mlit.commands = ['mlit']

View File

@ -5,7 +5,8 @@ author: mutantmonkey <mutantmonkey@gmail.com>
"""
from urllib import quote as urlquote
from urllib2 import urlopen, HTTPError
from urllib2 import HTTPError
import web
import lxml.html
def tfw(phenny, input, fahrenheit=False, celsius=False):
@ -22,17 +23,17 @@ def tfw(phenny, input, fahrenheit=False, celsius=False):
celsius_param = "&CELSIUS=yes"
try:
req = urlopen("http://thefuckingweather.com/?zipcode=%s%s" % (urlquote(zipcode), celsius_param))
req = web.get("http://thefuckingweather.com/?zipcode=%s%s" % (urlquote(zipcode), celsius_param))
except HTTPError:
phenny.say("THE INTERNET IS FUCKING BROKEN. Please try again later.")
return
doc = lxml.html.parse(req)
doc = lxml.html.fromstring(req)
location = doc.getroot().find_class('small')[0].text_content()
location = doc.find_class('small')[0].text_content()
try:
weather = doc.getroot().get_element_by_id('content')
weather = doc.get_element_by_id('content')
except KeyError:
phenny.say("Unknown location")
return

View File

@ -17,10 +17,8 @@ def location(name):
name = urllib.quote(name.encode('utf-8'))
uri = 'http://ws.geonames.org/searchJSON?q=%s&maxRows=1' % name
for i in xrange(10):
u = urllib.urlopen(uri)
if u is not None: break
bytes = u.read()
u.close()
bytes = web.get(uri)
if bytes is not None: break
results = web.json(bytes)
try: name = results['geonames'][0]['name']