switch to requests for HTTP queries
parent
259c222623
commit
5cb88f3cf8
|
@ -12,6 +12,11 @@ Compatibility with existing phenny modules has been mostly retained, but they
|
|||
will need to be updated to run on Python3 if they do not already. All of the
|
||||
core modules have been ported.
|
||||
|
||||
Requirements
|
||||
------------
|
||||
* Python 3.2+
|
||||
* [python-requests](http://docs.python-requests.org/en/latest/)
|
||||
|
||||
Installation
|
||||
------------
|
||||
1. Run `./phenny` - this creates a default config file
|
||||
|
|
|
@ -10,7 +10,8 @@ modified from Wikipedia module
|
|||
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
||||
"""
|
||||
|
||||
import re, urllib.request, urllib.parse, urllib.error
|
||||
import re
|
||||
import web
|
||||
import wiki
|
||||
|
||||
wikiapi = 'https://wiki.archlinux.org/api.php?action=query&list=search&srsearch={0}&limit=1&prop=snippet&format=json'
|
||||
|
@ -23,7 +24,7 @@ def awik(phenny, input):
|
|||
if not origterm:
|
||||
return phenny.say('Perhaps you meant ".awik dwm"?')
|
||||
|
||||
term = urllib.parse.unquote(origterm)
|
||||
term = web.unquote(origterm)
|
||||
term = term[0].upper() + term[1:]
|
||||
term = term.replace(' ', '_')
|
||||
|
||||
|
@ -31,7 +32,7 @@ def awik(phenny, input):
|
|||
|
||||
try:
|
||||
result = w.search(term)
|
||||
except IOError:
|
||||
except web.ConnectionError:
|
||||
error = "Can't connect to wiki.archlinux.org ({0})".format(wikiuri.format(term))
|
||||
return phenny.say(error)
|
||||
|
||||
|
|
|
@ -7,7 +7,14 @@ Licensed under the Eiffel Forum License 2.
|
|||
http://inamidst.com/phenny/
|
||||
"""
|
||||
|
||||
import re, math, time, urllib.request, urllib.parse, urllib.error, locale, socket, struct, datetime
|
||||
import re
|
||||
import math
|
||||
import time
|
||||
import locale
|
||||
import socket
|
||||
import struct
|
||||
import datetime
|
||||
import web
|
||||
from decimal import Decimal as dec
|
||||
from tools import deprecated
|
||||
|
||||
|
@ -273,9 +280,7 @@ yi.priority = 'low'
|
|||
|
||||
def tock(phenny, input):
|
||||
"""Shows the time from the USNO's atomic clock."""
|
||||
u = urllib.request.urlopen('http://tycho.usno.navy.mil/cgi-bin/timer.pl')
|
||||
info = u.info()
|
||||
u.close()
|
||||
info = web.head('http://tycho.usno.navy.mil/cgi-bin/timer.pl')
|
||||
phenny.say('"' + info['Date'] + '" - tycho.usno.navy.mil')
|
||||
tock.commands = ['tock']
|
||||
tock.priority = 'high'
|
||||
|
|
|
@ -4,16 +4,16 @@ commit.py - what the commit
|
|||
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
||||
"""
|
||||
|
||||
from urllib.error import HTTPError
|
||||
import web
|
||||
from tools import GrumbleError
|
||||
|
||||
|
||||
def commit(phenny, input):
|
||||
""".commit - Get a What the Commit commit message."""
|
||||
|
||||
try:
|
||||
msg = web.get("http://whatthecommit.com/index.txt")
|
||||
except (HTTPError, IOError, ValueError):
|
||||
except:
|
||||
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
||||
|
||||
phenny.reply(msg)
|
||||
|
|
|
@ -4,7 +4,6 @@ fcc.py - fcc callsign lookup
|
|||
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
||||
"""
|
||||
|
||||
from urllib.error import HTTPError
|
||||
from tools import GrumbleError
|
||||
import web
|
||||
import json
|
||||
|
@ -20,7 +19,7 @@ def fcc(phenny, input):
|
|||
try:
|
||||
req = web.get("http://callook.info/{0}/json".format(web.quote(callsign)))
|
||||
data = json.loads(req)
|
||||
except (HTTPError, IOError, ValueError):
|
||||
except:
|
||||
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
||||
|
||||
if len(data) <= 0 or data['status'] == 'INVALID':
|
||||
|
|
|
@ -4,7 +4,6 @@ foodforus.py - foodforus module
|
|||
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
||||
"""
|
||||
|
||||
from urllib.error import HTTPError
|
||||
from tools import GrumbleError
|
||||
import hashlib
|
||||
import json
|
||||
|
@ -31,7 +30,7 @@ def food(phenny, input):
|
|||
try:
|
||||
req = web.get(API_URL + '/food/' + web.quote(key.strip()))
|
||||
data = json.loads(req)
|
||||
except (HTTPError, IOError):
|
||||
except:
|
||||
raise GrumbleError("Uh oh, I couldn't contact foodforus. HOW WILL WE "\
|
||||
"EAT NOW‽")
|
||||
|
||||
|
@ -66,8 +65,8 @@ def foodvote(phenny, input):
|
|||
|
||||
try:
|
||||
req = web.post(API_URL + '/vote', postdata)
|
||||
data = json.loads(req)
|
||||
except (HTTPError, IOError):
|
||||
data = json.loads(req.text)
|
||||
except:
|
||||
raise GrumbleError("Uh oh, I couldn't contact foodforus. HOW WILL WE "\
|
||||
"EAT NOW‽")
|
||||
|
||||
|
@ -83,7 +82,7 @@ def pickfood(phenny, input):
|
|||
try:
|
||||
req = web.get(API_URL + '/food/' + web.quote(key.strip()))
|
||||
data = json.loads(req)
|
||||
except (HTTPError, IOError):
|
||||
except:
|
||||
raise GrumbleError("Uh oh, I couldn't contact foodforus. HOW WILL WE "\
|
||||
"EAT NOW‽")
|
||||
|
||||
|
|
|
@ -8,20 +8,19 @@ http://inamidst.com/phenny/
|
|||
"""
|
||||
|
||||
import re
|
||||
import urllib.request
|
||||
#import urllib.request
|
||||
import urllib.parse
|
||||
import urllib.error
|
||||
import http.client
|
||||
import http.cookiejar
|
||||
#import http.client
|
||||
#import http.cookiejar
|
||||
import time
|
||||
from html.entities import name2codepoint
|
||||
import web
|
||||
from tools import deprecated
|
||||
from modules.linx import get_title as linx_gettitle
|
||||
|
||||
cj = http.cookiejar.LWPCookieJar()
|
||||
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
|
||||
urllib.request.install_opener(opener)
|
||||
#cj = http.cookiejar.LWPCookieJar()
|
||||
#opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
|
||||
#urllib.request.install_opener(opener)
|
||||
|
||||
|
||||
def head(phenny, input):
|
||||
|
@ -47,11 +46,9 @@ def head(phenny, input):
|
|||
try:
|
||||
info = web.head(uri)
|
||||
info['status'] = '200'
|
||||
except urllib.error.HTTPError as e:
|
||||
except web.HTTPError as e:
|
||||
return phenny.say(str(e.code))
|
||||
except http.client.InvalidURL:
|
||||
return phenny.say("Not a valid URI, sorry.")
|
||||
except IOError:
|
||||
except web.ConnectionError:
|
||||
return phenny.say("Can't connect to %s" % uri)
|
||||
|
||||
resptime = time.time() - start
|
||||
|
@ -159,7 +156,7 @@ def gettitle(phenny, uri):
|
|||
#bytes = u.read(262144)
|
||||
#u.close()
|
||||
|
||||
except IOError:
|
||||
except web.ConnectionError:
|
||||
return
|
||||
|
||||
m = r_title.search(bytes)
|
||||
|
|
|
@ -17,8 +17,8 @@ NS = NS = '{urn:oasis:names:tc:DSML:2:0:core}'
|
|||
def search(query):
|
||||
query = web.quote(query)
|
||||
try:
|
||||
req = web.get(SEARCH_URL.format(query))
|
||||
except (HTTPError, IOError):
|
||||
req = web.get(SEARCH_URL.format(query), verify=False)
|
||||
except (web.ConnectionError, web.HTTPError):
|
||||
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
||||
|
||||
xml = lxml.etree.fromstring(req.encode('utf-8'))
|
||||
|
|
|
@ -6,12 +6,9 @@ author: Casey Link <unnamedrambler@gmail.com>
|
|||
"""
|
||||
|
||||
import random
|
||||
|
||||
import configparser, os
|
||||
import http.client
|
||||
from urllib.parse import quote as urlquote
|
||||
from urllib.request import urlopen
|
||||
from urllib.error import HTTPError
|
||||
import configparser
|
||||
import os
|
||||
import web
|
||||
from lxml import etree
|
||||
from datetime import datetime
|
||||
|
||||
|
@ -85,19 +82,15 @@ def now_playing(phenny, input):
|
|||
user = arg
|
||||
user = user.strip()
|
||||
try:
|
||||
req = urlopen("%smethod=user.getrecenttracks&user=%s" % (APIURL, urlquote(user)))
|
||||
except HTTPError as e:
|
||||
if e.code == 400:
|
||||
req = web.get("%smethod=user.getrecenttracks&user=%s" % (APIURL, web.quote(user)))
|
||||
except web.HTTPError as e:
|
||||
if e.response.status_code == 400:
|
||||
phenny.say("%s doesn't exist on last.fm, perhaps they need to set user" % (user))
|
||||
return
|
||||
else:
|
||||
phenny.say("uhoh. try again later, mmkay?")
|
||||
return
|
||||
except http.client.BadStatusLine:
|
||||
phenny.say("uhoh. try again later, mmkay?")
|
||||
return
|
||||
doc = etree.parse(req)
|
||||
root = doc.getroot()
|
||||
root = etree.fromstring(req.encode('utf-8'))
|
||||
recenttracks = list(root)
|
||||
if len(recenttracks) == 0:
|
||||
phenny.say("%s hasn't played anything recently. this isn't you? try lastfm-set" % (user))
|
||||
|
@ -155,16 +148,15 @@ def tasteometer(phenny, input):
|
|||
if not user2:
|
||||
user2 = input.nick
|
||||
try:
|
||||
req = urlopen("%smethod=tasteometer.compare&type1=user&type2=user&value1=%s&value2=%s" % (APIURL, urlquote(user1), urlquote(user2)))
|
||||
except (HTTPError, http.client.BadStatusLine) as e:
|
||||
if e.code == 400:
|
||||
req = web.get("%smethod=tasteometer.compare&type1=user&type2=user&value1=%s&value2=%s" % (APIURL, web.quote(user1), web.quote(user2)))
|
||||
except web.HTTPError as e:
|
||||
if e.response.status_code == 400:
|
||||
phenny.say("uhoh, someone doesn't exist on last.fm, perhaps they need to set user")
|
||||
return
|
||||
else:
|
||||
phenny.say("uhoh. try again later, mmkay?")
|
||||
return
|
||||
doc = etree.parse(req)
|
||||
root = doc.getroot()
|
||||
root = etree.fromstring(req.encode('utf-8'))
|
||||
score = root.xpath('comparison/result/score')
|
||||
if len(score) == 0:
|
||||
phenny.say("something isn't right. have those users scrobbled?")
|
||||
|
|
|
@ -5,7 +5,6 @@ author: andreim <andreim@andreim.net>
|
|||
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
||||
"""
|
||||
|
||||
from urllib.error import HTTPError
|
||||
from tools import GrumbleError
|
||||
import web
|
||||
import json
|
||||
|
@ -29,7 +28,7 @@ def linx(phenny, input, short=False):
|
|||
|
||||
try:
|
||||
req = web.post("https://linx.li/vtluug", {'url': url, 'short': short, 'api_key': phenny.config.linx_api_key})
|
||||
except (HTTPError, IOError):
|
||||
except (web.HTTPError, web.ConnectionError):
|
||||
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
||||
|
||||
data = json.loads(req)
|
||||
|
@ -71,7 +70,7 @@ def lines(phenny, input):
|
|||
|
||||
try:
|
||||
req = web.post("https://linx.li/vtluuglines", {'nickname': nickname, 'date': date, 'sender': input.nick, 'channel': input.sender, 'api_key': phenny.config.linx_api_key})
|
||||
except (HTTPError, IOError):
|
||||
except (web.HTTPError, web.ConnectionError):
|
||||
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
||||
|
||||
phenny.reply(req)
|
||||
|
@ -89,7 +88,7 @@ def posted(phenny, input):
|
|||
|
||||
try:
|
||||
req = web.post("https://linx.li/vtluugposted", {'message': message, 'sender': input.nick, 'channel': input.sender, 'api_key': phenny.config.linx_api_key})
|
||||
except (HTTPError, IOError):
|
||||
except (web.HTTPError, web.ConnectionError):
|
||||
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
||||
|
||||
phenny.reply(req)
|
||||
|
|
|
@ -5,7 +5,6 @@ author: Ramblurr <unnamedrambler@gmail.com>
|
|||
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
||||
"""
|
||||
|
||||
from urllib.error import HTTPError
|
||||
from tools import GrumbleError
|
||||
import web
|
||||
import lxml.html
|
||||
|
@ -15,7 +14,7 @@ def fml(phenny, input):
|
|||
""".fml"""
|
||||
try:
|
||||
req = web.get("http://www.fmylife.com/random")
|
||||
except (HTTPError, IOError):
|
||||
except:
|
||||
raise GrumbleError("I tried to use .fml, but it was broken. FML")
|
||||
|
||||
doc = lxml.html.fromstring(req)
|
||||
|
@ -28,7 +27,7 @@ def mlia(phenny, input):
|
|||
""".mlia - My life is average."""
|
||||
try:
|
||||
req = web.get("http://mylifeisaverage.com/")
|
||||
except (HTTPError, IOError):
|
||||
except:
|
||||
raise GrumbleError("I tried to use .mlia, but it wasn't loading. MLIA")
|
||||
|
||||
doc = lxml.html.fromstring(req)
|
||||
|
@ -42,7 +41,7 @@ def mlib(phenny, input):
|
|||
""".mlib - My life is bro."""
|
||||
try:
|
||||
req = web.get("http://mylifeisbro.com/random")
|
||||
except (HTTPError, IOError):
|
||||
except:
|
||||
raise GrumbleError("MLIB is out getting a case of Natty. It's chill.")
|
||||
|
||||
doc = lxml.html.fromstring(req)
|
||||
|
@ -55,7 +54,7 @@ def mlig(phenny, input):
|
|||
""".mlig - My life is ginger."""
|
||||
try:
|
||||
req = web.get("http://www.mylifeisginger.org/random")
|
||||
except (HTTPError, IOError):
|
||||
except:
|
||||
raise GrumbleError("Busy eating your soul. Be back soon.")
|
||||
|
||||
doc = lxml.html.fromstring(req)
|
||||
|
@ -68,7 +67,7 @@ def mlih(phenny, input):
|
|||
""".mlih - My life is ho."""
|
||||
try:
|
||||
req = web.get("http://mylifeisho.com/random")
|
||||
except (HTTPError, IOError):
|
||||
except:
|
||||
raise GrumbleError("MLIH is giving some dome to some lax bros.")
|
||||
|
||||
doc = lxml.html.fromstring(req)
|
||||
|
@ -81,7 +80,7 @@ def mlihp(phenny, input):
|
|||
""".mlihp - My life is Harry Potter."""
|
||||
try:
|
||||
req = web.get("http://www.mylifeishp.com/random")
|
||||
except (HTTPError, IOError):
|
||||
except:
|
||||
raise GrumbleError("This service is not available to Muggles.")
|
||||
|
||||
doc = lxml.html.fromstring(req)
|
||||
|
@ -94,7 +93,7 @@ def mlit(phenny, input):
|
|||
""".mlit - My life is Twilight."""
|
||||
try:
|
||||
req = web.get("http://mylifeistwilight.com/random")
|
||||
except (HTTPError, IOError):
|
||||
except:
|
||||
raise GrumbleError("Error: Your life is too Twilight. Go outside.")
|
||||
|
||||
doc = lxml.html.fromstring(req)
|
||||
|
|
|
@ -7,7 +7,7 @@ Licensed under the Eiffel Forum License 2.
|
|||
http://inamidst.com/phenny/
|
||||
"""
|
||||
|
||||
import re, urllib.request, urllib.parse, urllib.error
|
||||
import re
|
||||
import web
|
||||
|
||||
definitions = 'https://github.com/nslater/oblique/wiki'
|
||||
|
@ -30,9 +30,9 @@ def mappings(uri):
|
|||
|
||||
def service(phenny, input, command, args):
|
||||
t = o.services[command]
|
||||
template = t.replace('${args}', urllib.parse.quote(args, ''))
|
||||
template = template.replace('${nick}', urllib.parse.quote(input.nick, ''))
|
||||
uri = template.replace('${sender}', urllib.parse.quote(input.sender, ''))
|
||||
template = t.replace('${args}', web.quote(args, ''))
|
||||
template = template.replace('${nick}', web.quote(input.nick, ''))
|
||||
uri = template.replace('${sender}', web.quote(input.sender, ''))
|
||||
|
||||
info = web.head(uri)
|
||||
if isinstance(info, list):
|
||||
|
@ -104,7 +104,7 @@ def snippet(phenny, input):
|
|||
if not o.services:
|
||||
refresh(phenny)
|
||||
|
||||
search = urllib.parse.quote(input.group(2))
|
||||
search = web.quote(input.group(2))
|
||||
py = "BeautifulSoup.BeautifulSoup(re.sub('<.*?>|(?<= ) +', '', " + \
|
||||
"''.join(chr(ord(c)) for c in " + \
|
||||
"eval(urllib.urlopen('http://ajax.googleapis.com/ajax/serv" + \
|
||||
|
|
|
@ -4,8 +4,6 @@ rule34.py - rule 34 module
|
|||
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
||||
"""
|
||||
|
||||
from urllib.parse import quote as urlquote
|
||||
from urllib.error import HTTPError
|
||||
from tools import GrumbleError
|
||||
import web
|
||||
import lxml.html
|
||||
|
@ -19,8 +17,8 @@ def rule34(phenny, input):
|
|||
return
|
||||
|
||||
try:
|
||||
req = web.get("http://rule34.xxx/index.php?page=post&s=list&tags={0}".format(urlquote(q)))
|
||||
except (HTTPError, IOError):
|
||||
req = web.get("http://rule34.xxx/index.php?page=post&s=list&tags={0}".format(web.quote(q)))
|
||||
except:
|
||||
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
||||
|
||||
doc = lxml.html.fromstring(req)
|
||||
|
|
|
@ -10,24 +10,13 @@ http://inamidst.com/phenny/
|
|||
import re
|
||||
import web
|
||||
|
||||
class Grab(web.urllib.request.URLopener):
|
||||
def __init__(self, *args):
|
||||
self.version = 'Mozilla/5.0 (Phenny)'
|
||||
web.urllib.request.URLopener.__init__(self, *args)
|
||||
self.addheader('Referer', 'https://github.com/sbp/phenny')
|
||||
def http_error_default(self, url, fp, errcode, errmsg, headers):
|
||||
return web.urllib.addinfourl(fp, [headers, errcode], "http:" + url)
|
||||
|
||||
def google_ajax(query):
|
||||
"""Search using AjaxSearch, and return its JSON."""
|
||||
if isinstance(query, str):
|
||||
query = query.encode('utf-8')
|
||||
uri = 'http://ajax.googleapis.com/ajax/services/search/web'
|
||||
args = '?v=1.0&safe=off&q=' + web.quote(query)
|
||||
handler = web.urllib.request._urlopener
|
||||
web.urllib.request._urlopener = Grab()
|
||||
bytes = web.get(uri + args)
|
||||
web.urllib.request._urlopener = handler
|
||||
bytes = web.get(uri + args, headers={'Referer': 'https://github.com/sbp/phenny'})
|
||||
return web.json(bytes)
|
||||
|
||||
def google_search(query):
|
||||
|
|
|
@ -4,11 +4,11 @@ short.py - vtluug url shortner
|
|||
author: andreim <andreim@andreim.net>
|
||||
"""
|
||||
|
||||
from urllib.error import HTTPError
|
||||
from tools import GrumbleError
|
||||
import web
|
||||
import json
|
||||
|
||||
|
||||
def short(phenny, input):
|
||||
""".short <url> - Shorten a URL."""
|
||||
|
||||
|
@ -18,11 +18,11 @@ def short(phenny, input):
|
|||
return
|
||||
|
||||
try:
|
||||
req = web.post("http://vtlu.ug/vtluug", {'lurl': url})
|
||||
except (HTTPError, IOError):
|
||||
r = web.post("http://vtlu.ug/vtluug", {'lurl': url})
|
||||
except:
|
||||
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
||||
|
||||
phenny.reply(req)
|
||||
phenny.reply(r)
|
||||
short.rule = (['short'], r'(.*)')
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -7,7 +7,6 @@ author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
|||
|
||||
from tools import GrumbleError
|
||||
from modules import weather
|
||||
import urllib.error
|
||||
import random
|
||||
import metar
|
||||
import web
|
||||
|
@ -31,7 +30,7 @@ def tfw(phenny, input, fahrenheit=False, celsius=False):
|
|||
bytes = web.get(uri % icao_code)
|
||||
except AttributeError:
|
||||
raise GrumbleError("THE INTERNET IS FUCKING BROKEN. Please try again later.")
|
||||
except urllib.error.HTTPError:
|
||||
except web.HTTPError:
|
||||
phenny.say("WHERE THE FUCK IS THAT? Try another location.")
|
||||
return
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ Licensed under the Eiffel Forum License 2.
|
|||
http://inamidst.com/phenny/
|
||||
"""
|
||||
|
||||
import re, urllib.request, urllib.parse, urllib.error
|
||||
import re
|
||||
import json
|
||||
import web
|
||||
|
||||
|
@ -18,20 +18,19 @@ def translate(text, input='auto', output='en'):
|
|||
output = output[:-4]
|
||||
raw = True
|
||||
|
||||
opener = urllib.request.build_opener()
|
||||
opener.addheaders = [(
|
||||
'User-Agent', 'Mozilla/5.0' +
|
||||
'(X11; U; Linux i686)' +
|
||||
'Gecko/20071127 Firefox/2.0.0.11'
|
||||
)]
|
||||
input = urllib.parse.quote(input)
|
||||
output = urllib.parse.quote(output.encode('utf-8'))
|
||||
text = urllib.parse.quote(text.encode('utf-8'))
|
||||
#opener = urllib.request.build_opener()
|
||||
#opener.addheaders = [(
|
||||
# 'User-Agent', 'Mozilla/5.0' +
|
||||
# '(X11; U; Linux i686)' +
|
||||
# 'Gecko/20071127 Firefox/2.0.0.11'
|
||||
#)]
|
||||
input = web.quote(input)
|
||||
output = web.quote(output.encode('utf-8'))
|
||||
text = web.quote(text.encode('utf-8'))
|
||||
|
||||
result = opener.open('http://translate.google.com/translate_a/t?' +
|
||||
result = web.get('http://translate.google.com/translate_a/t?' +
|
||||
('client=t&hl=en&sl=%s&tl=%s&multires=1' % (input, output)) +
|
||||
('&otf=1&ssel=0&tsel=0&uptl=en&sc=1&text=%s' % text)).read()
|
||||
result = result.decode('utf-8')
|
||||
('&otf=1&ssel=0&tsel=0&uptl=en&sc=1&text=%s' % text))
|
||||
|
||||
while ',,' in result:
|
||||
result = result.replace(',,', ',null,')
|
||||
|
|
|
@ -4,12 +4,11 @@ urbandict.py - urban dictionary module
|
|||
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
||||
"""
|
||||
|
||||
import urllib.request
|
||||
from urllib.error import HTTPError
|
||||
from tools import GrumbleError
|
||||
import web
|
||||
import json
|
||||
|
||||
|
||||
def urbandict(phenny, input):
|
||||
""".urb <word> - Search Urban Dictionary for a definition."""
|
||||
|
||||
|
@ -19,27 +18,28 @@ def urbandict(phenny, input):
|
|||
return
|
||||
|
||||
# create opener
|
||||
opener = urllib.request.build_opener()
|
||||
opener.addheaders = [
|
||||
('User-agent', web.Grab().version),
|
||||
('Referer', "http://m.urbandictionary.com"),
|
||||
]
|
||||
#opener = urllib.request.build_opener()
|
||||
#opener.addheaders = [
|
||||
# ('User-agent', web.Grab().version),
|
||||
# ('Referer', "http://m.urbandictionary.com"),
|
||||
#]
|
||||
|
||||
try:
|
||||
req = opener.open("http://api.urbandictionary.com/v0/define?term={0}"
|
||||
.format(web.quote(word)))
|
||||
data = req.read().decode('utf-8')
|
||||
data = web.get(
|
||||
"http://api.urbandictionary.com/v0/define?term={0}".format(
|
||||
web.quote(word)))
|
||||
data = json.loads(data)
|
||||
except (HTTPError, IOError, ValueError):
|
||||
except:
|
||||
raise GrumbleError(
|
||||
"Urban Dictionary slemped out on me. Try again in a minute.")
|
||||
"Urban Dictionary slemped out on me. Try again in a minute.")
|
||||
|
||||
if data['result_type'] == 'no_results':
|
||||
phenny.say("No results found for {0}".format(word))
|
||||
return
|
||||
|
||||
result = data['list'][0]
|
||||
url = 'http://www.urbandictionary.com/define.php?term={0}'.format(web.quote(word))
|
||||
url = 'http://www.urbandictionary.com/define.php?term={0}'.format(
|
||||
web.quote(word))
|
||||
|
||||
response = "{0} - {1}".format(result['definition'].strip()[:256], url)
|
||||
phenny.say(response)
|
||||
|
|
|
@ -10,7 +10,8 @@ modified from Wikipedia module
|
|||
author: mutantmonkey <mutantmonkey@mutantmonkey.in>
|
||||
"""
|
||||
|
||||
import re, urllib.request, urllib.parse, urllib.error
|
||||
import re
|
||||
import web
|
||||
import wiki
|
||||
|
||||
wikiapi = 'https://vtluug.org/w/api.php?action=query&list=search&srsearch={0}&limit=1&prop=snippet&format=json'
|
||||
|
@ -23,7 +24,7 @@ def vtluug(phenny, input):
|
|||
if not origterm:
|
||||
return phenny.say('Perhaps you meant ".vtluug VT-Wireless"?')
|
||||
|
||||
term = urllib.parse.unquote(origterm)
|
||||
term = web.unquote(origterm)
|
||||
term = term[0].upper() + term[1:]
|
||||
term = term.replace(' ', '_')
|
||||
|
||||
|
@ -31,7 +32,7 @@ def vtluug(phenny, input):
|
|||
|
||||
try:
|
||||
result = w.search(term)
|
||||
except IOError:
|
||||
except web.ConnectionError:
|
||||
error = "Can't connect to vtluug.org ({0})".format(wikiuri.format(term))
|
||||
return phenny.say(error)
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ Licensed under the Eiffel Forum License 2.
|
|||
http://inamidst.com/phenny/
|
||||
"""
|
||||
|
||||
import re, urllib.request, urllib.parse, urllib.error
|
||||
import re
|
||||
import metar
|
||||
import web
|
||||
from tools import deprecated, GrumbleError
|
||||
|
@ -15,7 +15,7 @@ from tools import deprecated, GrumbleError
|
|||
r_from = re.compile(r'(?i)([+-]\d+):00 from')
|
||||
|
||||
def location(name):
|
||||
name = urllib.parse.quote(name)
|
||||
name = web.quote(name)
|
||||
uri = 'http://ws.geonames.org/searchJSON?q=%s&maxRows=1' % name
|
||||
for i in range(10):
|
||||
bytes = web.get(uri)
|
||||
|
@ -81,7 +81,7 @@ def f_weather(phenny, input):
|
|||
bytes = web.get(uri % icao_code)
|
||||
except AttributeError:
|
||||
raise GrumbleError('OH CRAP NOAA HAS GONE DOWN THE WEB IS BROKEN')
|
||||
except urllib.error.HTTPError:
|
||||
except web.HTTPError:
|
||||
phenny.say("No NOAA data available for that location.")
|
||||
return
|
||||
|
||||
|
|
|
@ -7,7 +7,8 @@ Licensed under the Eiffel Forum License 2.
|
|||
http://inamidst.com/phenny/
|
||||
"""
|
||||
|
||||
import re, urllib.request, urllib.parse, urllib.error, gzip, io
|
||||
import re
|
||||
import web
|
||||
import wiki
|
||||
|
||||
wikiapi = 'https://en.wikipedia.org/w/api.php?action=query&list=search&srsearch={0}&limit=1&prop=snippet&format=json'
|
||||
|
@ -20,7 +21,7 @@ def wik(phenny, input):
|
|||
if not origterm:
|
||||
return phenny.say('Perhaps you meant ".wik Zen"?')
|
||||
|
||||
term = urllib.parse.unquote(origterm)
|
||||
term = web.unquote(origterm)
|
||||
term = term[0].upper() + term[1:]
|
||||
term = term.replace(' ', '_')
|
||||
|
||||
|
@ -28,7 +29,7 @@ def wik(phenny, input):
|
|||
|
||||
try:
|
||||
result = w.search(term)
|
||||
except IOError:
|
||||
except web.ConnectionError:
|
||||
error = "Can't connect to en.wikipedia.org ({0})".format(wikiuri.format(term))
|
||||
return phenny.say(error)
|
||||
|
||||
|
|
|
@ -6,7 +6,6 @@ Copyright 2012, Randy Nance, randynance.info
|
|||
http://github.com/randynobx/phenny/
|
||||
"""
|
||||
|
||||
from urllib.error import URLError, HTTPError
|
||||
from tools import GrumbleError
|
||||
import re
|
||||
import web
|
||||
|
@ -19,7 +18,7 @@ def wuvt(phenny, input) :
|
|||
try:
|
||||
playing = web.get('http://www.wuvt.vt.edu/playlists/latest_track.php')
|
||||
djpage = web.get('http://www.wuvt.vt.edu/playlists/current_dj.php')
|
||||
except (URLError, HTTPError):
|
||||
except:
|
||||
raise GrumbleError('Cannot connect to wuvt')
|
||||
play= r_play.search(playing)
|
||||
song = play.group(2)
|
||||
|
|
60
web.py
60
web.py
|
@ -5,50 +5,41 @@ Author: Sean B. Palmer, inamidst.com
|
|||
About: http://inamidst.com/phenny/
|
||||
"""
|
||||
|
||||
import re, urllib.request, urllib.parse, urllib.error
|
||||
from html.entities import name2codepoint
|
||||
import re
|
||||
import urllib.parse
|
||||
import requests
|
||||
import json as jsonlib
|
||||
|
||||
class Grab(urllib.request.URLopener):
|
||||
def __init__(self, *args):
|
||||
self.version = 'Mozilla/5.0 (Phenny)'
|
||||
urllib.request.URLopener.__init__(self, *args)
|
||||
def http_error_default(self, url, fp, errcode, errmsg, headers):
|
||||
return urllib.addinfourl(fp, [headers, errcode], "http:" + url)
|
||||
urllib.request._urlopener = Grab()
|
||||
from requests.exceptions import ConnectionError, HTTPError, InvalidURL
|
||||
from html.entities import name2codepoint
|
||||
from urllib.parse import quote, unquote
|
||||
|
||||
def get(uri):
|
||||
user_agent = "Mozilla/5.0 (Phenny)"
|
||||
default_headers = {'User-Agent': user_agent}
|
||||
|
||||
def get(uri, headers={}, verify=True, **kwargs):
|
||||
if not uri.startswith('http'):
|
||||
return
|
||||
u = urllib.request.urlopen(uri)
|
||||
bytes = u.read()
|
||||
try:
|
||||
bytes = bytes.decode('utf-8')
|
||||
except UnicodeDecodeError:
|
||||
bytes = bytes.decode('ISO-8859-1')
|
||||
u.close()
|
||||
return bytes
|
||||
headers.update(default_headers)
|
||||
r = requests.get(uri, headers=headers, verify=verify, **kwargs)
|
||||
r.raise_for_status()
|
||||
return r.text
|
||||
|
||||
def head(uri):
|
||||
def head(uri, headers={}, verify=True, **kwargs):
|
||||
if not uri.startswith('http'):
|
||||
return
|
||||
u = urllib.request.urlopen(uri)
|
||||
info = u.info()
|
||||
u.close()
|
||||
return info
|
||||
headers.update(default_headers)
|
||||
r = requests.head(uri, headers=headers, verify=verify, **kwargs)
|
||||
r.raise_for_status()
|
||||
return r.headers
|
||||
|
||||
def post(uri, query):
|
||||
def post(uri, data, headers={}, verify=True, **kwargs):
|
||||
if not uri.startswith('http'):
|
||||
return
|
||||
data = urllib.parse.urlencode(query).encode('utf-8')
|
||||
u = urllib.request.urlopen(uri, data)
|
||||
bytes = u.read()
|
||||
try:
|
||||
bytes = bytes.decode('utf-8')
|
||||
except UnicodeDecodeError:
|
||||
bytes = bytes.decode('ISO-8859-1')
|
||||
u.close()
|
||||
return bytes
|
||||
headers.update(default_headers)
|
||||
r = requests.post(uri, data=data, headers=headers, verify=verify, **kwargs)
|
||||
r.raise_for_status()
|
||||
return r.text
|
||||
|
||||
r_entity = re.compile(r'&([^;\s]+);')
|
||||
|
||||
|
@ -62,9 +53,6 @@ def entity(match):
|
|||
return chr(name2codepoint[value])
|
||||
return '[' + value + ']'
|
||||
|
||||
def quote(text):
|
||||
return urllib.parse.quote(text)
|
||||
|
||||
def decode(html):
|
||||
return r_entity.sub(entity, html)
|
||||
|
||||
|
|
Loading…
Reference in New Issue